repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
geometalab/G4SE-Compass
|
compass-api/G4SE/api/migrations/0010_NO_WAY_BACK_drop_obsolete_tables_20161012_0747.py
|
Python
|
mit
| 1,451
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-12 07:47
from __future__ import unicode_literals
from django.db import migrations
drop_harvested_records = [
"DROP TRIGGER harvested_tsvectorupdate_de ON harvested_r
|
ecords;",
"DROP TRIGGER harvested_tsvectorupdate_en ON harvested_records;",
"DROP TRIGGER harvested_tsvectorupdate_fr ON harvested_records",
"DROP TABLE harvested_records;",
]
drop_records = [
"DROP TRIGGER tsvectorupdate_en ON records;",
"DROP TRIGGER tsvectorupdate_de ON records;",
"DROP TRIGGER tsvectorupdate_fr ON records;",
"DROP TABLE records;",
]
drop_trigger_functions = [
"DROP FUNCTION records_
|
trigger_de();",
"DROP FUNCTION records_trigger_en();",
"DROP FUNCTION records_trigger_fr();",
"DROP FUNCTION harvested_records_trigger_de();",
"DROP FUNCTION harvested_records_trigger_en();",
"DROP FUNCTION harvested_records_trigger_fr();",
]
class Migration(migrations.Migration):
dependencies = [
('api', '0009_drop_obsolete_view_20161012_0726'),
]
operations = [
migrations.RunSQL(drop_harvested_records),
migrations.RunSQL(drop_records),
migrations.RunSQL(drop_trigger_functions),
migrations.DeleteModel(name='RecordTaggedItem'),
migrations.DeleteModel(name='CombinedRecord'),
migrations.DeleteModel(name='Record'),
migrations.DeleteModel(name='HarvestedRecord'),
]
|
kelle/astropy
|
astropy/coordinates/tests/test_funcs.py
|
Python
|
bsd-3-clause
| 2,515
| 0.001194
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for miscellaneous functionality in the `funcs` module
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import numpy as np
from numpy import testing as npt
from ...extern import six
from ... import units as u
from ...time import Time
def test_sun():
"""
Test that `get_sun` works and it behaves roughly as it should (in GCRS)
"""
from ..funcs import get_sun
northern_summer_solstice = Time('2010-6-21')
nor
|
thern_winter_solstice = Time('2010-12-21')
equinox_1 = Time('2010-3-21')
equinox_2 = Time('2010-9-21')
gcrs1 = get_sun(equinox_1)
assert np.abs(gcrs1.dec.deg) < 1
gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice]))
assert np.all(np.abs(gcrs2.dec - [23.
|
5, 0, -23.5]*u.deg) < 1*u.deg)
def test_concatenate():
from .. import FK5, SkyCoord
from ..funcs import concatenate
fk5 = FK5(1*u.deg, 2*u.deg)
sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5')
res = concatenate([fk5, sc])
np.testing.assert_allclose(res.ra, [1, 3]*u.deg)
np.testing.assert_allclose(res.dec, [2, 4]*u.deg)
with pytest.raises(TypeError):
concatenate(fk5)
with pytest.raises(TypeError):
concatenate(1*u.deg)
def test_constellations():
from .. import ICRS, FK5, SkyCoord
from ..funcs import get_constellation
inuma = ICRS(9*u.hour, 65*u.deg)
res = get_constellation(inuma)
res_short = get_constellation(inuma, short_name=True)
assert res == 'Ursa Major'
assert res_short == 'UMa'
assert isinstance(res, six.string_types) or getattr(res, 'shape', None) == tuple()
# these are taken from the ReadMe for Roman 1987
ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]
decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]
shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men']
testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950')
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)
# test on a SkyCoord, *and* test Boötes, which is special in that it has a
# non-ASCII character
bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs')
boores = get_constellation(bootest)
assert boores == u'Boötes'
assert isinstance(boores, six.string_types) or getattr(boores, 'shape', None) == tuple()
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/mission/quest_item/shared_rakir_banai_q2_needed.py
|
Python
|
mit
| 477
| 0.046122
|
#### NOTICE: T
|
HIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_rakir_banai_q2_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_tato_n","rakir_banai_q2_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFI
|
CATIONS ####
return result
|
vrenkens/Nabu-asr
|
nabu/processing/tfwriters/alignment_writer.py
|
Python
|
mit
| 651
| 0.003072
|
'''@file alignment_writer.py
contains the AlignmentWriter
|
class'''
import numpy as np
import tensorflow as tf
import tfwriter
class AlignmentWriter(tfwriter.TfWriter):
'''a TfWriter to write kaldi alignments'''
def _get_example(self, data):
'''write data to a file
Args:
data: the data to be written'''
data_feature = tf.train.Feature(bytes_list=tf.train.BytesList(
value=[data.reshape([-1]).astype(np.int32).tostring()]))
#create the example proto
example = tf.train.
|
Example(features=tf.train.Features(feature={
'data': data_feature}))
return example
|
DjangoAdminHackers/ixxy-admin-utils
|
ixxy_admin_utils/admin_mixins.py
|
Python
|
mit
| 6,862
| 0.005683
|
from django.http import HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from .custom_fields import (
BooleanTimeStampField,
BooleanTimeStampFormField,
BooleanTimeStampWidget,
)
try:
from dal_select2.widgets import ModelSelect2Multiple
except ImportError: # Avoid ImportError in the absence of django-autocomplete-light
ModelSelect2Multiple = None
class RedirectableAdmin(object):
"""If you use this as a mixin to your ModelAdmin then the change and add forms will accept
a url parame
|
ter '_redirect' and redirect to that on save"""
def response_post_save_change(self, request, obj):
if '_redirect' in request.GET:
return HttpResponseRedirect(request.GET['_redirect'])
else:
return super(RedirectableAdmin, self).response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
if '_redirect' in request.GET:
|
return HttpResponseRedirect(request.GET['_redirect'])
else:
return super(RedirectableAdmin, self).response_post_save_add(request, obj)
def delete_view(self, request, object_id, extra_context=None):
response = super(RedirectableAdmin, self).delete_view(request, object_id, extra_context)
if '_redirect' in request.GET and response.status_code == 302:
return HttpResponseRedirect(request.GET['_redirect'])
else:
return response
class ModifyRelatedObjectAdmin(RedirectableAdmin):
"""If you use this as a mixin to your ModelAdmin then the change form will accept
_redirect same as with RedirectableAdmin. Additionally add forms
will also accept paramters to identify a parent object and field which will
be set to the newly created object before redirecting"""
def response_post_save_add(self, request, obj):
if '_related_object' in request.GET:
app_label, model_name, object_id, field_name = request.GET['_related_object'].split(' ')
content_type = ContentType.objects.get_by_natural_key(app_label, model_name)
related_object = content_type.get_object_for_this_type(pk=object_id)
setattr(related_object, field_name, obj)
related_object.save()
return super(ModifyRelatedObjectAdmin, self).response_post_save_add(request, obj)
class HideAddRelatedMixin(object):
"""ModelAdmin mixin that disables the green 'add related object' plus icon
for any fields listed in hide_add_related_fields
Usage: hide_add_related_fields = ['user', 'group']
Alternately if there is a property 'show_add_related_fields' then this works as a whitelist"""
def get_form(self, request, obj=None, **kwargs):
form = super(HideAddRelatedMixin, self).get_form(request, obj, **kwargs)
if getattr(self, 'show_add_related_fields', None) is not None:
for field in form.base_fields.keys():
if field not in self.show_add_related_fields:
form.base_fields[field].widget.can_add_related = False
else:
for field in getattr(self, 'hide_add_related_fields', []):
form.base_fields[field].widget.can_add_related = False
return form
class DisableDeletionMixin(object):
def has_delete_permission(self, request, obj=None):
return False
class LongListFilterMixin(object):
"""Automatically reduce the amount of space taken up by very long filters.
It hides the list of options and replaces it with an input field that autocompletes.
Unlike a true autocomplete this won't save queries or speed up page load
but it's a quick and dirty improvement to the UI"""
@property
def media(self):
cdn_base = 'https://ajax.googleapis.com/ajax/libs/'
show = getattr(self, 'long_list_filter_show', 'active')
threshold = getattr(self, 'long_list_filter_threshold', '300')
height = getattr(self, 'long_list_filter_height', '100')
media = super(LongListFilterMixin, self).media
media.add_js([
'{}jqueryui/1.11.4/jquery-ui.min.js'.format(cdn_base),
'js/ixxy_admin_utils/long_list_filter.js?show={}&threshold={}&height={}'.format(
show,
threshold,
height,
),
])
media.add_css({
'all': [
'{}jqueryui/1.11.4/themes/smoothness/jquery-ui.css'.format(cdn_base)
]
})
return media
class AutocompleteMixin(object):
"""Reduces the amount of boilerplate needed by autocomplete-light.
Define a property on your ModelAdmin called 'autocomplete_widgets'.
This is a dict mapping field names to Autocomplete fields:
autocomplete_widgets = {
'contact': autocomplete.ModelSelect2(url='contact-autocomplete'),
'event': autocomplete.ModelSelect2(url='event-autocomplete'),
'team': autocomplete.ModelSelect2(url='team-autocomplete', forward=['event']),
}
"""
def formfield_for_dbfield(self, db_field, **kwargs):
# Automatically assign autocomplete widgets based on an autocomplete_widgets dict
if db_field.name in getattr(self, 'autocomplete_widgets', {}):
kwargs['widget'] = self.autocomplete_widgets[db_field.name]
return super(AutocompleteMixin, self).formfield_for_dbfield(db_field, **kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
# Remove the hardcoded m2m help_text if the widget is ModelSelect2Multiple
form_field = super(AutocompleteMixin, self).formfield_for_manytomany(
db_field,
request,
**kwargs
)
if isinstance(form_field.widget, ModelSelect2Multiple):
unwanted_msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
form_field.help_text = form_field.help_text.replace(unwanted_msg, '')
return form_field
class BooleanTimeStampMixin(object):
"""If you this with any model containing BooleanTimeStampField
then flipping the checkbox to 'on' will set the datetime to timezone.now()
The widget will be a checkbox with the stored datetime as label"""
def formfield_for_dbfield(self, db_field, **kwargs):
if isinstance(db_field, BooleanTimeStampField):
kwargs['form_class'] = BooleanTimeStampFormField
kwargs['widget'] = BooleanTimeStampWidget(label=db_field.verbose_name.title())
kwargs['label'] = ''
kwargs.pop('request')
db_field.formfield(**kwargs)
return super(BooleanTimeStampMixin, self).formfield_for_dbfield(db_field, **kwargs)
|
rajarahulray/iDetector
|
tests_and_ References/table_view_text_box.py
|
Python
|
mit
| 1,160
| 0.010345
|
import tkinter as tk
class ExampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
t = SimpleTable(self, 10,2)
t.pack(side="top", fill="x")
t.set(0,0,"Hello, world")
class SimpleTable(tk.Frame):
def __init__(self, parent, rows=10, columns=2):
# use black background so it "peeks through" to
# form grid lines
tk.Frame.__init__(self, parent, background="black")
self._wi
|
dgets = []
for row in range(rows):
current_row = []
for column in range(columns):
label = tk.Label(self, text="%s/%s" % (row, column),
borderwidth=0, width=10, height = 10)
label.grid(row=row, column=column, sti
|
cky="nsew", padx=1, pady=1)
current_row.append(label)
self._widgets.append(current_row)
for column in range(columns):
self.grid_columnconfigure(column, weight=1)
def set(self, row, column, value):
widget = self._widgets[row][column]
widget.configure(text=value)
if __name__ == "__main__":
app = ExampleApp()
app.mainloop()
|
dgbc/django-arctic
|
example/example/settings.py
|
Python
|
mit
| 3,469
| 0.000288
|
"""
Django settings for example project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from .arctic import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Path helper
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(u$wqr4t^)-7)&3hc(o49-svh-em-y4$!u9kr99gavo(1pipdu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
INTERNAL_IPS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'dashboard',
'articles',
'arctic',
'arctic.users',
]
MIDDLEW
|
ARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csr
|
f.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'nl-nl'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = location("static")
STATIC_URL = '/static/'
MEDIA_ROOT = location("media")
MEDIA_URL = '/media/'
LOGIN_URL = LOGOUT_URL = 'login'
try:
import debug_toolbar
MIDDLEWARE_CLASSES.append(
'debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS.append('debug_toolbar')
except ImportError:
pass
|
tianz/MyInventory
|
manage.py
|
Python
|
mit
| 254
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault
|
("DJANGO_SETTINGS_MODULE", "myinventory.settings")
from django.core.management im
|
port execute_from_command_line
execute_from_command_line(sys.argv)
|
jcartledge/sublime-worksheet
|
repl/pexpect.py
|
Python
|
mit
| 79,154
| 0.004321
|
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo myname@host.example.com:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, Shane Kerr and Thomas Kluyver. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2010 Noah Spurrier
http://pexpect.sourceforge.net/
"""
try:
import os, sys, time
import select
import re
import struct
import types
import errno
import traceback
import signal
except ImportError as e:
raise ImportError (str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
try:
# Linux ST2 doesn't package the resource module so ... uh ...
import resource
except:
pass
try:
import pty
import tty
import termios
import fcntl
except ImportError:
pass
try:
# Linux ST2 doesn't package the resource module so ... uh ...
import resource
except:
pass
__version__ = '2.5.1'
version = __version__
version_info = (2,5,1)
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnb', 'run', 'which',
'split_command_line', '__version__']
PY3K = sys.version_info >= (3, 0, 0)
if PY3K:
def u(s):
return s
string_types = str,
text_type = str
binary_type = bytes
Iterator = object
else:
def u(s):
return unicode(s, 'unicode_escape')
string_types = basestring,
text_type = unicode
binary_type = str
class Iterator(object):
def next(self):
return type(self).__next__(self)
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
def _cast_bytes(s, enc):
if isinstance(s, string_types):
return s.encode(enc)
return s
def _cast_unicode(s, enc):
if isinstance(s, binary_type):
return s.decode(enc)
return s
re_type = type(re.compile(''))
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
|
The run() function can often
|
be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo myname@host.example.com:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo myname@host.example.com:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and e
|
nickgaya/python2
|
python2/client/client.py
|
Python
|
mit
| 2,862
| 0
|
# TODO: Logging
import contextlib
import json
import logging
import weakref
from python2.client.codec import ClientCodec
from python2.client.exceptions import Py2Error
from python2.client.object import Py2Object
SPECIAL_EXCEPTION_TYPES = {t.__name__: t for t in (StopIteration, TypeError)}
logger = logging.getLogger(__name__)
class Py2Client:
"""
Python 2 internal client.
This class is used to send commands to a Python 2 process and unpack the
responses.
"""
def __init__(self, infile, outfile):
self.infile = infile
self.outfile = outfile
self.objects = weakref.WeakValueDictionary()
self.codec = ClientCodec(self)
def get_object(self, oid):
""" Get the Py2Object with the given object id, or None. """
return self.objects.get(oid)
def create_object(self, oid):
""" Create a Py2Object with the given object id. """
obj = Py2Object(self, oid)
self.objects[oid] = obj
return obj
def _send(self, data):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Sending: {!r}".format(data))
self.outfile.write(json.dumps(data).encode())
self.outfile.write(b'\n')
self.outfile.flush()
def _receive(self):
data = json.loads(self.infile.readline().decode())
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Received: {!r}".format(data))
return data
def encode_command(self, command, *args):
session = self.codec.encoding_session()
return dict(command=command,
args=[session.encode(arg) for arg in args])
def decode_result(self, data):
if data['result'] == 'return':
return self.codec.decode(data['value'])
elif da
|
ta['result'] == 'raise':
exception_type = Py2Error
if data['types']:
# Dynamically generate Py2Error subclass with relevant base
# types. This is a hack to allow iterators to work correctly.
bases = [Py2Error]
bases.extend(SPECIAL_EXCEPTION_TYPES[tname]
for tname in data['types'])
|
exception_type = type('Py2Error~', tuple(bases), {})
raise exception_type(
self.codec.decode(data['message']),
exception=self.codec.decode(data['exception'])
)
else:
raise Exception("Invalid server response: result={!r}".format(
data['result']))
def do_command(self, command, *args):
self._send(self.encode_command(command, *args))
return self.decode_result(self._receive())
def close(self):
with contextlib.ExitStack() as stack:
stack.callback(self.infile.close)
stack.callback(self.outfile.close)
|
scailer/picarchive
|
apps/notice/models.py
|
Python
|
mit
| 582
| 0
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Notice(models.Model):
text = models.TextField(_(u'Заметка'))
user = models.ForeignKey('account.User', verbose_name=_(u'Пользователь'))
picture = models.ForeignKey('picture.Picture', verbose_name=_(u'Картинка'))
creation_date = models.DateTimeField(
_(u'Дата создания'),
|
auto_now_add=True)
class Meta:
verbose_name = _(u'За
|
метка')
verbose_name_plural = _(u'Заметки')
|
truemped/dopplr
|
dopplr/solr/__init__.py
|
Python
|
apache-2.0
| 707
| 0
|
# vim: set fileencoding=utf-8 :
#
# Copyright (c) 2012 Retresco GmbH
# Copyright (c) 2012 Daniel Truemper <truemped at googlemail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the
|
License for the specific language governing permissions and
# limitations under the License.
#
#
"""
This is d
|
opplr.
"""
|
kyvinh/home-assistant
|
homeassistant/components/history.py
|
Python
|
apache-2.0
| 12,100
| 0
|
"""
Provide pre-made queries on top of the recorder component.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/history/
"""
import asyncio
from collections import defaultdict
from datetime import timedelta
from itertools import groupby
import logging
import time
import voluptuous as vol
from homeassistant.const import (
HTTP_BAD_REQUEST, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
from homeassistant.components import recorder, script
from homeassistant.components.frontend import register_built_in_panel
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_HIDDEN
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'history'
DEPENDENCIES = ['recorder', 'http']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
CONF_EXCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS,
|
default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
CONF_INCLUDE: vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
}),
}, extra=vol.ALLOW_EXTRA)
SIGNIFICANT_DOMAINS = ('therm
|
ostat', 'climate')
IGNORE_DOMAINS = ('zone', 'scene',)
def last_5_states(entity_id):
"""Return the last 5 states for entity_id."""
entity_id = entity_id.lower()
states = recorder.get_model('States')
return recorder.execute(
recorder.query('States').filter(
(states.entity_id == entity_id) &
(states.last_changed == states.last_updated)
).order_by(states.state_id.desc()).limit(5))
def get_significant_states(start_time, end_time=None, entity_id=None,
filters=None):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
entity_ids = (entity_id.lower(), ) if entity_id is not None else None
states = recorder.get_model('States')
query = recorder.query(states).filter(
(states.domain.in_(SIGNIFICANT_DOMAINS) |
(states.last_changed == states.last_updated)) &
(states.last_updated > start_time))
if filters:
query = filters.apply(query, entity_ids)
if end_time is not None:
query = query.filter(states.last_updated < end_time)
states = (
state for state in recorder.execute(
query.order_by(states.entity_id, states.last_updated))
if (_is_significant(state) and
not state.attributes.get(ATTR_HIDDEN, False)))
return states_to_json(states, start_time, entity_id, filters)
def state_changes_during_period(start_time, end_time=None, entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
states = recorder.get_model('States')
query = recorder.query('States').filter(
(states.last_changed == states.last_updated) &
(states.last_changed > start_time))
if end_time is not None:
query = query.filter(states.last_updated < end_time)
if entity_id is not None:
query = query.filter_by(entity_id=entity_id.lower())
states = recorder.execute(
query.order_by(states.entity_id, states.last_updated))
return states_to_json(states, start_time, entity_id)
def get_states(utc_point_in_time, entity_ids=None, run=None, filters=None):
"""Return the states at a specific point in time."""
if run is None:
run = recorder.run_information(utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
from sqlalchemy import and_, func
states = recorder.get_model('States')
most_recent_state_ids = recorder.query(
func.max(states.state_id).label('max_state_id')
).filter(
(states.created >= run.start) &
(states.created < utc_point_in_time) &
(~states.domain.in_(IGNORE_DOMAINS)))
if filters:
most_recent_state_ids = filters.apply(most_recent_state_ids,
entity_ids)
most_recent_state_ids = most_recent_state_ids.group_by(
states.entity_id).subquery()
query = recorder.query('States').join(most_recent_state_ids, and_(
states.state_id == most_recent_state_ids.c.max_state_id))
for state in recorder.execute(query):
if not state.attributes.get(ATTR_HIDDEN, False):
yield state
def states_to_json(states, start_time, entity_id, filters=None):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
entity_ids = [entity_id] if entity_id is not None else None
# Get the states at the start time
for state in get_states(start_time, entity_ids, filters=filters):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
# Append all changes to it
for entity_id, group in groupby(states, lambda state: state.entity_id):
result[entity_id].extend(group)
return result
def get_state(utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = list(get_states(utc_point_in_time, (entity_id,), run))
return states[0] if states else None
# pylint: disable=unused-argument
def setup(hass, config):
"""Setup the history hooks."""
filters = Filters()
exclude = config[DOMAIN].get(CONF_EXCLUDE)
if exclude:
filters.excluded_entities = exclude[CONF_ENTITIES]
filters.excluded_domains = exclude[CONF_DOMAINS]
include = config[DOMAIN].get(CONF_INCLUDE)
if include:
filters.included_entities = include[CONF_ENTITIES]
filters.included_domains = include[CONF_DOMAINS]
hass.http.register_view(Last5StatesView)
hass.http.register_view(HistoryPeriodView(filters))
register_built_in_panel(hass, 'history', 'History', 'mdi:poll-box')
return True
class Last5StatesView(HomeAssistantView):
"""Handle last 5 state view requests."""
url = '/api/history/entity/{entity_id}/recent_states'
name = 'api:history:entity-recent-states'
@asyncio.coroutine
def get(self, request, entity_id):
"""Retrieve last 5 states of entity."""
result = yield from request.app['hass'].loop.run_in_executor(
None, last_5_states, entity_id)
return self.json(result)
class HistoryPeriodView(HomeAssistantView):
"""Handle history period requests."""
url = '/api/history/period'
name = 'api:history:view-period'
extra_urls = ['/api/history/period/{datetime}']
def __init__(self, filters):
"""Initilalize the history period view."""
self.filters = filters
@asyncio.coroutine
def get(self, request, datetime=None):
"""Return history over a period of time."""
timer_start = time.perf_counter()
if datetime:
datetime = dt_util.parse_datetime(datetime)
if datetime is None:
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
now = dt_util.utcnow()
one_day = timedelta(days=1)
if datetime:
start_time = dt_util.as_utc(datetime)
else:
start_time = now - one_day
if start_time > now:
return self.json([])
end_time = request.GET.get('end_time')
if end_time:
end_time = dt_util.as_utc(
|
jschavem/facial-expression-classification
|
source/classification/cv_subjects_multithreaded_pca.py
|
Python
|
mit
| 5,651
| 0.003185
|
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from threading import Thread
import time
from Queue import Queue, Empty
from lib.in_subject_cross_validation import *
import lib.in_subject_cross_validation as libcv
import multiprocessing
from sklearn.decomposition import RandomizedPCA
attribute_counts = {
'koelstra-approach': {
# 0: [3, 4, 28, 32, 33, 41, 62, 70],
0: 22,
1: 25,
2: 18
},
'koelstra-normalized': {
0: 8,
1: 7,
2: 36
},
'au-counts': {
0: 32,
1: 27,
2: 24
},
'au-counts-valence': {
0: 32,
1: 27,
2: 24
},
'au-counts-weighted': {
0: 12,
1: 13,
2: 10
},
'au-counts-avg': {
0: 18
}
}
q = Queue()
# Performs recursive feature elimination until 'attribute count' has been reached
def _eliminate_features(X_test, X_train, attribute_count, y_train):
print "Eliminating features until %d has been reached" % attribute_count
pca = RandomizedPCA(n_components=attribute_count+10).fit(X_train)
X_train = pca.transform(to_float(X_train))
print "Finished pca"
clf = SVC(**SVC_parameters)
rfe = RFE(clf, n_features_to_select=attribute_count, step=0.1)
fit = rfe.fit(X_train, y_train)
print "Finished rfe"
# Reduce the feature matrices to contain just the selected features
X_train = [fit.transform(X) for X in X_train]
X_test = [fit.transform(X) for X in pca.transform(to_float(X_test))]
return X_test, X_train
def _cv_instances(Xs, ys, test_index, train_index, result_pairs, attribute_count):
# print "Cross validating with %d left out" % test_index
Xs_train, Xs_test = flatten(Xs[train_index]), flatten(Xs[test_index])
ys_train, ys_test = flatten(ys[train_index]), flatten(ys[test_index])
if attribute_count is not None:
Xs_test, Xs_train = _eliminate_features(Xs_test, Xs_train, attribute_count, ys_train)
Xs_test = flatten(Xs_test)
Xs_train = flatten(Xs_train)
# clf = SVC(**SVC_parameters)
clf = GaussianNB()
clf.fit(to_float(Xs_train), ys_train)
ys_pred = clf.predict(to_float(Xs_test))
predicted_class = list(ys_pred)
actual_class = ys_test
print "%d, %.3f" % (test_index[0], accuracy_score(actual_class, predicted_class))
# print "Finished cross validation for %d" % test_index
result_pairs.append((actual_class, predicted_class))
def threaded_worker():
while True:
try:
arguments = q.get(False)
_cv_instances(*arguments)
q.task_done()
except Empty:
break
def cross_validate_combined_dataset(Xs, ys, num_attributes=None, threaded=False):
leave_one_out = cross_validation.LeaveOneOut(len(ys))
result_pairs = []
threads = []
for train_index, test_index in leave_one_out:
if threaded:
q.put((Xs, ys, test_index, train_index, result_pairs, num_attributes))
else:
_cv_instances(Xs, ys, test_index, train_index, result_pairs, num_attributes)
if threaded:
for num in range(1, multiprocessing.cpu_count()):
print "Starting thread %d" % num
thread = Thread(target=threaded_worker)
threads.append(thread)
thread.start()
[thread.join() for thread in threads]
actual_classes = [actual for (actual, _) in result_pairs]
predicted_classes = [predicted for (_, predicted) in result_pairs]
return flatten(actual_classes), flatten(predicted_classes)
def flatten(list):
return [item for sublist in list for item in sublist]
def to_float(list):
return [[float(item) for item in sublist] for sublist in list]
def print_report(actual, attr_count, class_id, dataset, predicted):
# Print the performance to the console
conf_matrix = confusion_matrix(actual, predicted, ['low', 'high'])
print ""
print conf_matrix
scores = f1_score(actual, predicted, ['low', 'high'], 'low', average=None)
class_counts = [sum(row) for row in conf_matrix]
average_f1 = np.average(scores, weights=class_counts)
accuracy = accuracy_score(actual, predicted)
print "\nAverage F1 score: %.3f" % average_f1
print "Average accuracy: %.3f" % accuracy
low_ratings = [p for (idx, p) in enumerate(predicted) if actual[idx] == 'low']
high_ratings = [p for (idx, p) in enumerate(predicted) if actual[idx] == 'high']
#print low_ratings
#print high_ratings
print "Low accuracy: %.3f" % (float(low_ratings.count('low')) / len(low_ratings))
print "High accuracy: %.3f" % (float(high_ratings.count('high')) / len(high_ratings))
attr_names = ["valence", "arousal", "control"]
print "%s\tLeave-one-subject-out%s\t%s\t%s\t%.3f\t%.3f" % (
dataset, '' if (attr_count is None) else '-rfe', attr_names[class_id], time.strftime('%Y-%m-%d'
|
), average_f1,
accuracy)
def main():
dataset = 'koelstra-approach'
class_id = 0
ground_truth_variable_count = 3
attr_count = attribute_counts[dataset][class_id]
# attr_count = None
# Load our dataset into memory
|
Xs, ys = libcv._load_full_dataset(dataset, class_id, ground_truth_variable_count)
# Perform cross-validation on the dataset, using RFE to achieve the target attr_count
actual, predicted = cross_validate_combined_dataset(Xs, ys, attr_count, threaded=False)
print_report(actual, attr_count, class_id, dataset, predicted)
if __name__ == '__main__':
main()
|
fsantini/rasPyCNCController
|
gcode/GCodeLoader.py
|
Python
|
gpl-3.0
| 2,216
| 0.002256
|
# rasPyCNCController
# Copyright 2016 Francesco Santini <francesco.santini@gmail.com>
#
# This file is part of rasPyCNCController.
#
# rasPyCNCController is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rasPyCNCController is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rasPyCNCController. If not, see <http://www.gnu.org/licenses/>.
from PySide import QtCore
from GCodeAnalyzer import GCodeAnalyzer
import sys
import pycnc_config
class GCodeLoader(QtCore.QThread):
load_finished = QtCore.Signal()
load_error = QtCore.Signal(object)
def __init__(self):
QtCore.QThread.__init__(self)
self.file = None
self.gcode = None
self.times = None
self.bBox = None
self.loaded = False
self.totalTime = 0
self.busy = False
self.g0_feed = pycnc_config.G0_FEED
def run(self):
self.loaded = False
self.gcode = []
self.times = []
self.bBox = None
self.totalTime = 0
self.busy = True
analyzer = GCodeAnalyzer()
|
analyzer.fastf = self.g0_feed
try:
with open(self.file) as f:
for line in f:
analyzer.Ana
|
lyze(line)
self.gcode.append(line)
self.times.append(analyzer.getTravelTime()*60) # time returned is in minutes: convert to seconds
except:
self.busy = False
e = sys.exc_info()[0]
self.load_error.emit("%s" % e)
return
self.busy = False
self.loaded = True
self.totalTime = self.times[-1]
self.bBox = analyzer.getBoundingBox()
self.load_finished.emit()
def load(self, file):
self.file = file
self.start()
|
bgris/ODL_bgris
|
doc/gitwash_dumper.py
|
Python
|
gpl-3.0
| 8,036
| 0.000249
|
#!/usr/bin/env python
''' Checkout gitwash repo into directory and do search replace on name '''
from __future__ import (absolute_import, division, print_function)
import os
from os.path import join as pjoin
import shutil
import sys
import re
import glob
import fnmatch
import tempfile
from subprocess import call
from optparse import OptionParser
verbose = False
def clone_repo(url, branch):
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
cmd = 'git clone %s %s' % (url, tmpdir)
call(cmd, shell=True)
os.chdir(tmpdir)
cmd = 'git checkout %s' % branch
call(cmd, shell=True)
except:
shutil.rmtree(tmpdir)
raise
finally:
os.chdir(cwd)
return tmpdir
def cp_files(in_path, globs, out_path):
try:
os.makedirs(out_path)
except OSError:
pass
out_fnames = []
for in_glob in globs:
in_glob_path = pjoin(in_path, in_glob)
for in_fname in glob.glob(in_glob_path):
out_fname = in_fname.replace(in_path, out_path)
pth, _ = os.path.split(out_fname)
if not os.path.isdir(pth):
os.makedirs(pth)
shutil.copyfile(in_fname, out_fname)
out_fnames.append(out_fname)
return out_fnames
def filename_search_replace(sr_pairs, filename, backup=False):
''' Search and replace for expressions in files
'''
with open(filename, 'rt') as in_fh:
in_txt = in_fh.read(-1)
out_txt = in_txt[:]
for in_exp, out_exp in sr_pairs:
in_exp = re.compile(in_exp)
out_txt = in_exp.sub(out_exp, out_txt)
if in_txt == out_txt:
return False
with open(filename, 'wt') as out_fh:
out_fh.write(out_txt)
if backup:
with open(filename + '.bak', 'wt') as bak_fh:
bak_fh.write(in_txt)
return True
def copy_replace(replace_pairs,
repo_path,
out_path,
cp_globs=('*',),
rep_globs=('*',),
renames=()):
out_fnames = cp_files(repo_path, cp_globs, out_path)
renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames]
fnames = []
for rep_glob in rep_globs:
fnames += fnmatch.filter(out_fnames, rep_glob)
if verbose:
print('\n'.join(fnames))
for fname in fnames:
filename_search_replace(replace_pairs, fname, False)
for in_exp, out_exp in renames:
new_fname, n = in_exp.subn(out_exp, fname)
if n:
os.rename(fname, new_fname)
break
def make_link_targets(proj_name,
user_name,
repo_name,
known_link_fname,
out_link_fname,
url=None,
ml_url=None):
""" Check and make link targets
If url is None or ml_url is None, check if there are links present for
these in `known_link_fname`. If not, raise error. The check is:
Look for a target `proj_name`.
Look for a target `proj_name` + ' mailing list'
Also, look for a target `proj_name` + 'github'. If this exists, don't
write this target into the new file below.
If we are writing any of the url, ml_url, or github address, then write
new file with these links, of form:
.. _`proj_name`
.. _`proj_name`: url
.. _`proj_name` mailing list: url
"""
with open(known_link_fname, 'rt') as link_fh:
link_contents = link_fh.readlines()
have_url = url is not None
have_ml_url = ml_url is not None
have_gh_url = None
for line in link_contents:
if not have_url:
match = re.match(r'..\s+_`%s`:\s+' % proj_name, line)
if match:
have_url = True
if not have_ml_url:
match = re.match(r'..\s+_`%s mailing list`:\s+' % proj_name, line)
if match:
have_ml_url = True
if not have_gh_url:
match = re.match(r'..\s+_`%s github`:\s+' % proj_name, line)
if match:
have_gh_url = True
if not have_url or not have_ml_url:
raise RuntimeError('Need command line or known project '
'and / or mailing list URLs')
lines = []
if url is not None:
lines.append('.. _`%s`: %s\n' % (proj_name, url))
if not have_gh_url:
gh_url = 'http://github.com/%s/%s\n' % (user_name, repo_name)
lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url))
if ml_url is not None:
lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url))
if len(lines) == 0:
# Nothing to do
return
# A neat little header line
lines = ['.. %s\n' % proj_name] + lines
with open(out_link_fname, 'wt') as out_links:
out_links.writelines(lines)
USAGE = ''' <output_directory> <project_name>
If not set with options, the repository name is the same as the <project
name>
If not set with options, the main github user is the same as the
repository name.'''
GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git'
GITWASH_BRANCH = 'master'
def main():
parser = OptionParser()
parser.set_usage(parser.get_usage().strip() + USAGE)
parser.add_option("--repo-name", dest="repo_name",
help="repository name - e.g. nitime",
metavar="REPO_NAME")
parser.add_option("--github-user", dest="main_gh_user",
help="github username for main repo - e.g fperez",
metavar="MAIN_GH_USER")
parser.add_option("--gitwash-url", dest="gitwash_url",
help="URL to gitwash repository - default %s"
% GITWASH_CENTRAL,
default=GITWASH_CENTRAL,
metavar="GITWASH_URL")
parser.add_option("--gitwash-branch", dest="gitwash_branch",
help="branch in gitwash repository - default %s"
% GITWASH_BRANCH,
default=GITWASH_BRANCH,
metavar="GITWASH_BRANCH")
parser.add_option("--source-suffix", dest="source_suffix",
help="suffix of ReST source files - default '.rst'",
default='.rst',
metavar="SOURCE_
|
SUFFIX")
parser.add_option("--project-url", dest="project_url",
help="URL for projec
|
t web pages",
default=None,
metavar="PROJECT_URL")
parser.add_option("--project-ml-url", dest="project_ml_url",
help="URL for project mailing list",
default=None,
metavar="PROJECT_ML_URL")
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit()
out_path, project_name = args
if options.repo_name is None:
options.repo_name = project_name
if options.main_gh_user is None:
options.main_gh_user = options.repo_name
repo_path = clone_repo(options.gitwash_url, options.gitwash_branch)
try:
copy_replace((('PROJECTNAME', project_name),
('REPONAME', options.repo_name),
('MAIN_GH_USER', options.main_gh_user)),
repo_path,
out_path,
cp_globs=(pjoin('gitwash', '*'),),
rep_globs=('*.rst',),
renames=(('\.rst$', options.source_suffix),))
make_link_targets(project_name,
options.main_gh_user,
options.repo_name,
pjoin(out_path, 'gitwash', 'known_projects.inc'),
pjoin(out_path, 'gitwash', 'this_project.inc'),
options.project_url,
options.project_ml_url)
finally:
shutil.rmtree(repo_path)
if __name__ == '__main__':
main()
|
socketubs/pyhn
|
pyhn/poller.py
|
Python
|
mit
| 568
| 0
|
# -*- coding: utf-8 -*-
from time import sleep
from threading import Thread
class Poller(Thread):
def __init__(self, gui, delay=5):
if delay < 1:
delay = 1
self.gui = gui
self.delay = delay
|
self.is_running = True
self.count
|
er = 0
super(Poller, self).__init__()
def run(self):
while self.is_running:
sleep(0.1)
self.counter += 0.1
if self.counter >= self.delay * 60:
self.gui.async_refresher(force=True)
self.counter = 0
|
afaheem88/tempest_neutron
|
tempest/api/image/v2/test_images_tags_negative.py
|
Python
|
apache-2.0
| 1,715
| 0
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.image import base
from te
|
mpest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ImagesTagsNegativeTest(ba
|
se.BaseV2ImageTest):
@test.attr(type=['negative', 'gate'])
def test_update_tags_for_non_existing_image(self):
# Update tag with non existing image.
tag = data_utils.rand_name('tag-')
non_exist_image = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.add_image_tag,
non_exist_image, tag)
@test.attr(type=['negative', 'gate'])
def test_delete_non_existing_tag(self):
# Delete non existing tag.
_, body = self.create_image(container_format='bare',
disk_format='raw',
visibility='private'
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
self.addCleanup(self.client.delete_image, image_id)
self.assertRaises(exceptions.NotFound, self.client.delete_image_tag,
image_id, tag)
|
sivakuna-aap/superdesk-core
|
superdesk/io/ingest_provider_model.py
|
Python
|
agpl-3.0
| 9,799
| 0.002857
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from eve.utils import config
from flask import g, current_app as app
import superdesk
from superdesk import get_resource_service
from superdesk.activity import ACTIVITY_CREATE, ACTIVITY_EVENT, ACTIVITY_UPDATE, notify_and_add_activity, \
ACTIVITY_DELETE
from superdesk.errors import SuperdeskApiError
from superdesk.io import allowed_feeding_services, allowed_feed_parsers
from superdesk.metadata.item import CONTENT_STATE, content_type
from superdesk.notification import push_notification
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.utc import utcnow
from superdesk.utils import required_string
logger = logging.getLogger(__name__)
class IngestProviderResource(Resource):
def __init__(self, endpoint_name, app, service, endpoint_schema=None):
self.schema = {
'name': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'iunique': True
},
'source': required_string,
'feeding_service': {
'type': 'string',
'required': True,
'allowed': allowed_feeding_services
},
'feed_parser': {
'type': 'string',
'nullable': True,
'allowed': allowed_feed_parsers
},
'content_types': {
'type': 'list',
'default': content_type,
'allowed': content_type
},
'content_expiry': {
'type': 'integer',
'default': app.config['INGEST_EXPIRY_MINUTES']
},
'config': {
'type': 'dict'
},
'ingested_count': {
'type': 'integer'
},
'accepted_count': {
'type': 'integer'
},
'token': {
'type': 'dict'
},
'is_closed': {
'type': 'boolean',
'default': False
},
'update_schedule': {
'type': 'dict',
'schema': {
'hours': {'type': 'integer'},
'minutes': {'type': 'integer', 'default': 5},
'seconds': {'type': 'integer'},
}
},
'idle_time': {
'type': 'dict',
'schema': {
'hours': {'type': 'integer'},
'minutes': {'type': 'integer'},
}
},
'last_updated': {'type': 'datetime'},
'last_item_update': {'type': 'datetime'},
'rule_set': Resource.rel('rule_sets', nullable=True),
'notifications': {
'type': 'dict',
'schema': {
'on_update': {'type': 'boolean', 'default': True},
'on_close': {'type': 'boolean', 'default': True},
'on_open': {'type': 'boolean', 'default': True},
'on_error': {'type': 'boolean', 'default': True}
}
},
'routing_scheme': Resource.rel('routing_schemes', nullable=True),
'last_closed': {
'type': 'dict',
'schema': {
'closed_at': {'type': 'datetime'},
'closed_by': Resource.rel('users', nullable=True),
'message': {'type': 'string'}
}
},
'last_opened': {
'type': 'dict',
'schema': {
'opened_at': {'type': 'datetime'},
'opened_by': Resource.rel('users', nullable=True)
}
},
'critical_errors': {
'type': 'dict',
'valueschema': {
'type': 'boolean'
}
},
}
self.item_methods = ['GET', 'PATCH', 'DELETE']
self.privileges = {'POST': 'ingest_providers', 'PATCH': 'ingest_providers', 'DELETE': 'ingest_providers'}
self.etag_ignore_fields = ['last_updated', 'last_item_update', 'last_closed', 'last_opened']
super().__init__(endpoint_name, app, service, endpoint_schema=endpoint_schema)
class IngestProviderService(BaseService):
def __init__(self, datasource=None, backend=None):
super().__init__(datasource=datasource, backend=backend)
self.user_service = get_resource_service('users')
def _set_provider_status(self, doc, message=''):
user = getattr(g, 'user', None)
if doc.get('is_closed', True):
doc['last_closed'] = doc.get('last_closed', {})
doc['last_closed']['closed_at'] = utcnow()
doc['last_closed']['closed_by'] = user['_id'] if user else None
doc['last_closed']['message'] = message
else:
doc['last_opened'] = doc.get('last_opened', {})
doc['last_opened']['opened_at'] = utcnow()
doc['last_opened']['opened_by'] = user['_id'] if user else None
def on_create(self, docs):
for doc in docs:
if doc.get('content_expiry', 0) == 0:
doc['content_expiry'] = app.config['INGEST_EXPIRY_MINUTES']
self._set_provider_status(doc, doc.get('last_closed', {}).get('message', ''))
def on_created(self, docs):
for doc in docs:
notify_and_add_activity(ACTIVITY_CREATE, 'Created Ingest Channel {{name}}',
self.datasource, item=None,
user_list=self.user_service.get_users_by_user_type('administrator'),
name=doc.get('name'), provider_id=doc.get('_id'))
push_notification('ingest_provider:create', provider_id=str(doc.get('_id')))
logger
|
.info("Created Ingest Channel. Data:{}".format(docs))
def on_update(self, updates, original):
if updates.get('content_expiry') == 0:
updates['cont
|
ent_expiry'] = app.config['INGEST_EXPIRY_MINUTES']
if 'is_closed' in updates and original.get('is_closed', False) != updates.get('is_closed'):
self._set_provider_status(updates, updates.get('last_closed', {}).get('message', ''))
def on_updated(self, updates, original):
do_notification = updates.get('notifications', {})\
.get('on_update', original.get('notifications', {}).get('on_update', True))
notify_and_add_activity(ACTIVITY_UPDATE, 'updated Ingest Channel {{name}}',
self.datasource, item=None,
user_list=self.user_service.get_users_by_user_type('administrator')
if do_notification else None,
name=updates.get('name', original.get('name')),
provider_id=original.get('_id'))
if updates.get('is_closed', False) != original.get('is_closed', False):
status = ''
do_notification = False
if updates.get('is_closed'):
status = 'closed'
do_notification = updates.get('notifications', {}). \
get('on_close', original.get('notifications', {}).get('on_close', True))
elif not updates.get('is_closed'):
status = 'opened'
do_notification = updates.get('notifications', {}). \
get('on_open', original.get('notifications', {}).get('on_open', True))
notify_and_add_activity(ACTIVITY_EVENT, '{{status}} Ingest Channel {{name}}',
self.datasource, item=None,
user_list=self.user_serv
|
agiliq/django-graphos
|
graphos/tests.py
|
Python
|
bsd-2-clause
| 38,867
| 0.002135
|
from django.test import TestCase
from pymongo.errors import CollectionInvalid
from .sources.base import BaseDataSource
from .sources.simple import SimpleDataSource
from .sources.csv_file import CSVDataSource
from .sources.model import ModelDataSource
from .sources.mongo import MongoDBDataSource
from .renderers import base, flot, gchart, yui, matplotlib_renderer, highcharts
from .exceptions import GraphosException
from .utils import DEFAULT_HEIGHT, DEFAULT_WIDTH, get_default_options, get_db
from demo.models import Account
import os
import json
current_path = os.path.dirname(os.path.abspath(__file__))
class TestSources(TestCase):
def test_base_data_source(self):
data_source = BaseDataSource()
self.assertTrue(hasattr(data_source, "get_data"))
self.assertRaises(GraphosException, data_source.get_data,)
self.assertTrue(hasattr(data_source, "get_header"))
self.assertRaises(GraphosException, data_source.get_header)
self.assertTrue(hasattr(data_source, "get_first_column"))
self.assertRaises(GraphosException, data_source.get_first_column)
def test_simple_data_source(self):
data = [
['Year', 'Sales', 'Expenses'],
['2004', 1000, 400],
['2005', 1170, 460],
['2006', 660, 1120],
['2007', 1030, 540]
]
data_source = SimpleDataSource(data)
self.assertEqual(data_source.get_data(), data)
self.assertEqual(data_source.get_header(),
['Year', 'Sales', 'Expenses'])
self.assertEqual(data_source.get_first_column(),
['2004', '2005', '2006', '2007'])
def test_csv_data_source(self):
data = [
['Year', 'Sales', 'Expense'],
['2006', '1000', '400'],
['2007', '1170', '460'],
['2008', '660', '1120'],
['2009', '1030', '540']
]
csv_file = open(os.path.join(current_path, "test_data/accounts.csv"),
"r")
data_source = CSVDataSource(csv_file)
self.assertEqual(data, data_source.get_data())
self.assertEqual(data_source.get_header(),
['Year', 'Sales', 'Expense'])
self.assertEqual(data_source.get_first_column(),
['2006', '2007', '2008', '2009'])
def test_model_data_source(self):
data = [
['year', 'sales', 'expenses'],
[u'2004', 1000, 400],
[u'2005', 1170, 460],
[u'2006', 660, 1120],
[u'2007', 1030, 540]
]
#Create some rows
Account.objects.create(year="2004", sales=1000,
expenses=400, ceo="Welch")
Account.objects.create(year="2005", sales=1170,
expenses=460, ceo="Jobs")
Account.objects.create(year="2006", sales=660,
expenses=1120, ceo="Page")
Account.objects.create(year="2007", sales=1030,
expenses=540, ceo="Welch")
query_set = Account.objects.all()
data_source = ModelDataSource(query_set, ['year', 'sales', 'expenses'])
self.assertEqual(data, data_source.get_data())
self.assertEqual(data_source.get_header(),
['year', 'sales', 'expenses'])
self.assertEqual(data_source.get_first_column(),
['2004', '2005', '2006', '2007'])
def get_mongodb_test_db(db_name, collection_name):
cur_dir = os.path.dirname(os.path.realpath(__file__))
test_data_file = open(cur_dir + '/test_data/mongodb/test_zips.json')
db = get_db(db_name)
try:
db.create_collection(collection_name)
except CollectionInvalid:
pass
for line in test_data_file:
doc = json.loads(line)
db[collection_name].save(doc)
test_data_file.close()
return db
class TestMongoDBSource(TestCase):
def setUp(self):
db_name = "test_db"
collection_name = "zips"
self.db = get_mongodb_test_db(db_name, collection_name)
self.collection = self.db[collection_name]
self.cursor = self.collection.find()
self.fields = ['_id', 'pop']
self.data = [['_id', 'pop'], ['35004', 6055], ['35005', 10616],
['35006', 3205], ['35007', 14218], ['35010', 19942],
['35014', 3062], ['35016', 13650], ['35019', 1781],
['35020', 40549], ['35023', 39677], ['35031', 9058],
['35033', 3448], ['35034', 3791], ['35035', 1282],
['35040', 4675], ['35042', 4902], ['35043', 4781],
['35044', 7985], ['35045', 13990], ['35049', '']]
self.data_source = MongoDBDataSource(cursor=self.cursor,
fields=self.fields)
def test_data_source(self):
self.assertTrue(hasattr(self.data_source, 'get_data'))
self.assertTrue(hasattr(self.data_source, 'get_header'))
self.assertTrue(hasattr(self.data_source, 'get_first_column'))
self.assertEqual(self.data, self.data_source.get_data())
self.assertEqual(self.fields, self.data_source.get_header())
self.assertEqual(
[el[0] for el in self.data[1:]],
self.data_source.get_first_column()
)
def tearDown(self):
self.db.drop_collection(self.collection.name)
class TestBaseRenderer(TestCase):
def setUp(self):
data = [
['Year', 'Sales', 'Expenses'],
[2004, 1000, 400],
[2005, 1170, 460],
[2006, 660, 1120],
[2007, 1030, 540]
]
self.options = {"title": "Sales and Expences Graph"}
self.default_options = {'title': 'Chart'}
self.empty_options = {}
self.data_source = SimpleDataSource(data)
self.data = data
self.html_id = 'base_chart'
self.template = 'graphos/as_html.html'
self.header = data[0]
def test_base_chart(self):
chart = base.BaseChart(data_source=self.data_source,
options=self.options,
html_id=self.html_id)
empty_options_chart = base.BaseChart(data_source=self.data_source,
options=self.empty_options)
self.assertTrue(hasattr(chart, "width"))
self.assertEqual(DEFAULT_WIDTH, chart.width)
self.assertTrue(hasattr(chart, "height"))
self.assertEqual(DEFAULT_HEIGHT, chart.height)
self.assertTrue(hasattr(chart, "header"))
self.assertEqual(self.header, chart.header)
self.assertTrue(hasattr(chart, "get_data"))
self.assertEqual(self.data, chart.get_data())
self.assertTrue(hasattr(chart, "get_data_json"))
self.assertEqual(json.dumps(self.data), chart.get_data_json())
self.assertTrue(hasattr(chart, "get_options"))
self.assertEqual(self.options, chart.get_options())
self.assertEqual(self.default_options,
empty_options_chart.get_options())
self.assertTrue(hasattr(chart, "get_options_json"))
self.assertEqual(json.dumps(self.options),
chart.get_options_json())
self.assertTrue(hasattr(chart, "get_template"))
self.assertEqual(self.template, chart.get_template())
self.assertTrue(hasattr(chart, "get_html_
|
template"))
self.assertRaises(GraphosException, chart.get_html_template)
self.a
|
ssertTrue(hasattr(chart, "get_js_template"))
self.assertRaises(GraphosException, chart.get_js_template)
self.assertTrue(hasattr(chart, "get_html_id"))
self.assertTrue(self.html_id, chart.get_html_id())
self.assertTrue(hasattr(chart, "as_html"))
self.assertRaises(GraphosException, chart.as_html)
def test_options(self):
"""
Assert that options get set to a dictionary in case no options is passed during initialization
"""
chart = base.BaseChart(data_source=self.data_source)
self.assertEqual(self.default_options
|
dejlek/pulsar
|
pulsar/apps/rpc/jsonrpc.py
|
Python
|
bsd-3-clause
| 11,409
| 0
|
import sys
import json
import logging
import asyncio
from collections import namedtuple
from pulsar import AsyncObject, as_coroutine, new_event_loop, ensure_future
from pulsar.utils.string import gen_unique_id
from pulsar.utils.tools import checkarity
from pulsar.apps.wsgi import Json
from pulsar.apps.http import HttpClient
from .handlers import RpcHandler, InvalidRequest, exception
__all__ = ['JSONRPC', 'JsonProxy', 'JsonBatchProxy']
logger = logging.getLogger('pulsar.jsonrpc')
BatchResponse = namedtuple('BatchResponse', 'id result exception')
class JSONRPC(RpcHandler):
'''An :class:`.RpcHandler` for JSON-RPC services.
Design to comply with the `JSON-RPC 2.0`_ Specification.
JSON-RPC is a lightweight remote procedure call protocol
designed to be simple.
A remote method is invoked by sending a request to a remote service,
the request is a single object serialised using JSON.
.. _`JSON-RPC 2.0`: http://www.jsonrpc.org/specification
'''
version = '2.0'
def __call__(self, request):
return ensure_future(self._execute_request(request))
@asyncio.coroutine
def _execute_request(self, request):
response = request.response
try:
data = yield from as_coroutine(request.body_data())
except ValueError:
res, status = self._get_error_and_status(InvalidRequest(
status=415, msg='Content-Type must be application/json'))
else:
# if it's batch request
if isinstance(data, list):
status = 200
tasks = [self._call(request, each) for each in data]
result = yield from asyncio.gather(*tasks)
res = [r[0] for r in result]
else:
res, status = yield from self._call(request, data)
response.status_code = status
return Json(res).http_response(request)
@asyncio.coroutine
def _call(self, request, data):
exc_info = None
proc = None
try:
if (not isinstance(data, dict) or
data.get('jsonrpc') != self.version or
'id' not in data):
raise InvalidRequest(
'jsonrpc must be supplied and equal to "%s"' %
self.version
)
params = data.get('params')
if isinstance(params, dict):
args, kwargs = (), params
else:
args, kwargs = tuple(params or ()), {}
#
proc = self.get_handler(data.get('method'))
result = yield from as_coroutine(proc(request, *args, **kwargs))
except Exception as exc:
result = exc
exc_info = sys.exc_info()
else:
try:
json.dumps(result)
except Exception as exc:
result = exc
exc_info = sys.exc_info()
#
if exc_info:
if isinstance(result, TypeE
|
rror) and proc:
msg = checkarity(proc, args, kwargs, discount=1)
else:
msg = None
rpc_id = data.get('id') if isinstance(data, dict) else None
res, status = self._get_error_and_status(
result, msg=msg,
|
rpc_id=rpc_id, exc_info=exc_info)
else:
res = {
'id': data.get('id'),
'jsonrpc': self.version,
'result': result
}
status = 200
return res, status
def _get_error_and_status(self, exc, msg=None, rpc_id=None,
exc_info=None):
res = {'id': rpc_id, 'jsonrpc': self.version}
code = getattr(exc, 'fault_code', None)
if not code:
code = -32602 if msg else -32603
msg = msg or str(exc) or 'JSON RPC exception'
if code == -32603:
logger.error(msg, exc_info=exc_info)
else:
logger.warning(msg)
res['error'] = {
'code': code,
'message': msg,
'data': getattr(exc, 'data', '')
}
return res, getattr(exc, 'status', 400)
class JsonCall:
slots = ('_client', '_name')
def __init__(self, client, name):
self._client = client
self._name = name
def __repr__(self):
return self._name
__str__ = __repr__
@property
def url(self):
return self._client.url
@property
def name(self):
return self._name
def __getattr__(self, name):
name = "%s%s%s" % (self._name, self._client.separator, name)
return self.__class__(self._client, name)
def __call__(self, *args, **kwargs):
result = self._client._call(self._name, *args, **kwargs)
if self._client.sync:
return self._client._loop.run_until_complete(result)
else:
return result
class JsonProxy(AsyncObject):
'''A python Proxy class for :class:`.JSONRPC` Servers.
:param url: server location
:param version: JSON-RPC server version. Default ``2.0``
:param id: optional request id, generated if not provided.
Default ``None``.
:param data: Extra data to include in all requests. Default ``None``.
:param full_response: return the full Http response rather than
just the content.
:param http: optional http client. If provided it must have the ``request``
method available which must be of the form::
http.request(url, body=..., method=...)
Default ``None``.
Lets say your RPC server is running at ``http://domain.name.com/``::
>>> a = JsonProxy('http://domain.name.com/')
>>> a.add(3,4)
7
>>> a.ping()
'pong'
'''
separator = '.'
default_version = '2.0'
default_timeout = 30
def __init__(self, url, version=None, data=None,
full_response=False, http=None, timeout=None, sync=False,
loop=None, **kw):
self.sync = sync
self._url = url
self._version = version or self.__class__.default_version
self._full_response = full_response
self._data = data if data is not None else {}
if not http:
timeout = timeout if timeout is not None else self.default_timeout
if sync and not loop:
loop = new_event_loop()
http = HttpClient(timeout=timeout, loop=loop, **kw)
http.headers['accept'] = 'application/json, text/*; q=0.5'
http.headers['content-type'] = 'application/json'
self._http = http
@property
def url(self):
return self._url
@property
def version(self):
return self._version
@property
def _loop(self):
return self._http._loop
def makeid(self):
'''Can be re-implemented by your own Proxy'''
return gen_unique_id()
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.__url)
def __str__(self):
return self.__repr__()
def __getattr__(self, name):
return JsonCall(self, name)
def _call(self, name, *args, **kwargs):
data = self._get_data(name, *args, **kwargs)
body = json.dumps(data).encode('utf-8')
resp = yield from self._http.post(self._url, data=body)
if self._full_response:
return resp
else:
content = resp.decode_content()
if resp.is_error:
if 'error' not in content:
resp.raise_for_status()
return self.loads(content)
def _get_data(self, func_name, *args, **kwargs):
id = self.makeid()
params = self.get_params(*args, **kwargs)
data = {'method': func_name, 'params': params, 'id': id,
'jsonrpc': self._version}
return data
def get_params(self, *args, **kwargs):
'''
Create an array or positional or named parameters
Mixing positional and named parameters in one
call is not possible.
'''
kwargs.update(self._data)
if args and kwargs:
raise
|
patricklaw/pants
|
src/python/pants/backend/python/goals/repl.py
|
Python
|
apache-2.0
| 5,683
| 0.002815
|
# Copyright 2020 Pants proj
|
ect contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE
|
).
import os
from pants.backend.python.subsystems.ipython import IPython
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex import Pex, PexRequest
from pants.backend.python.util_rules.pex_environment import PexEnvironment
from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
from pants.backend.python.util_rules.python_sources import (
PythonSourceFiles,
PythonSourceFilesRequest,
)
from pants.core.goals.repl import ReplImplementation, ReplRequest
from pants.engine.addresses import Addresses
from pants.engine.fs import Digest, MergeDigests
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
class PythonRepl(ReplImplementation):
name = "python"
@rule(level=LogLevel.DEBUG)
async def create_python_repl_request(repl: PythonRepl, pex_env: PexEnvironment) -> ReplRequest:
# Note that we get an intermediate PexRequest here (instead of going straight to a Pex) so
# that we can get the interpreter constraints for use in local_dists_request.
requirements_pex_request = await Get(
PexRequest,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(tgt.address for tgt in repl.targets), internal_only=True
),
)
requirements_request = Get(Pex, PexRequest, requirements_pex_request)
local_dists_request = Get(
LocalDistsPex,
LocalDistsPexRequest(
Addresses(tgt.address for tgt in repl.targets),
interpreter_constraints=requirements_pex_request.interpreter_constraints,
),
)
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(repl.targets, include_files=True)
)
requirements_pex, local_dists, sources = await MultiGet(
requirements_request, local_dists_request, sources_request
)
merged_digest = await Get(
Digest,
MergeDigests(
(requirements_pex.digest, local_dists.pex.digest, sources.source_files.snapshot.digest)
),
)
complete_pex_env = pex_env.in_workspace()
args = complete_pex_env.create_argv(
repl.in_chroot(requirements_pex.name), python=requirements_pex.python
)
chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python_configured=requirements_pex.python is not None),
"PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
"PEX_PATH": repl.in_chroot(local_dists.pex.name),
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
class IPythonRepl(ReplImplementation):
name = "ipython"
@rule(level=LogLevel.DEBUG)
async def create_ipython_repl_request(
repl: IPythonRepl, ipython: IPython, pex_env: PexEnvironment
) -> ReplRequest:
# Note that we get an intermediate PexRequest here (instead of going straight to a Pex) so
# that we can get the interpreter constraints for use in ipython_request/local_dists_request.
requirements_pex_request = await Get(
PexRequest,
PexFromTargetsRequest,
PexFromTargetsRequest.for_requirements(
(tgt.address for tgt in repl.targets), internal_only=True
),
)
requirements_request = Get(Pex, PexRequest, requirements_pex_request)
sources_request = Get(
PythonSourceFiles, PythonSourceFilesRequest(repl.targets, include_files=True)
)
ipython_request = Get(
Pex,
PexRequest(
output_filename="ipython.pex",
main=ipython.main,
requirements=ipython.pex_requirements(),
interpreter_constraints=requirements_pex_request.interpreter_constraints,
internal_only=True,
),
)
requirements_pex, sources, ipython_pex = await MultiGet(
requirements_request, sources_request, ipython_request
)
local_dists = await Get(
LocalDistsPex,
LocalDistsPexRequest(
[tgt.address for tgt in repl.targets],
interpreter_constraints=requirements_pex_request.interpreter_constraints,
sources=sources,
),
)
merged_digest = await Get(
Digest,
MergeDigests(
(
requirements_pex.digest,
local_dists.pex.digest,
local_dists.remaining_sources.source_files.snapshot.digest,
ipython_pex.digest,
)
),
)
complete_pex_env = pex_env.in_workspace()
args = list(
complete_pex_env.create_argv(repl.in_chroot(ipython_pex.name), python=ipython_pex.python)
)
if ipython.options.ignore_cwd:
args.append("--ignore-cwd")
chrooted_source_roots = [repl.in_chroot(sr) for sr in sources.source_roots]
extra_env = {
**complete_pex_env.environment_dict(python_configured=ipython_pex.python is not None),
"PEX_PATH": os.pathsep.join(
[
repl.in_chroot(requirements_pex_request.output_filename),
repl.in_chroot(local_dists.pex.name),
]
),
"PEX_EXTRA_SYS_PATH": os.pathsep.join(chrooted_source_roots),
}
return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
def rules():
return [
*collect_rules(),
UnionRule(ReplImplementation, PythonRepl),
UnionRule(ReplImplementation, IPythonRepl),
]
|
naturalness/partycrasher
|
lp/bucketizer.py
|
Python
|
gpl-3.0
| 4,063
| 0.004922
|
#!/usr/bin/env python
# Copyright 2015 Joshua Charles Campbell
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
|
110-1301, USA.
import MySQLdb
from sys import argv
import re, os, shutil, errno
def f1(seq):
# not order preserving
set = {}
map(set.__setitem__, seq, [])
return set.keys()
def main():
db = MySQLdb.connect("localhost", "bicho", argv[1], "bicho")
bugkets = dict()
bugs_to_bugkets = dict()
next_bugketid = 1
|
new_bugket = True
while new_bugket:
new_bugket = False
cursor = db.cursor()
cursor.execute("SELECT lp_id, duplicate_of, duplicates_list FROM issues_ext_launchpad NATURAL JOIN issues;")
for row in cursor:
#print repr(row)
lp_ids = [row[0]]
if row[1] is not None:
lp_ids.append(row[1])
if row[2] is not None:
lp_ids.extend([int(bugid) for bugid in row[2].split()])
destination_bugkets = set()
for bugid in lp_ids:
if bugid in bugs_to_bugkets:
destination_bugkets.add(bugs_to_bugkets[bugid])
if len(destination_bugkets)>0:
bugket_id = next(iter(destination_bugkets))
if len(destination_bugkets)>1:
print repr(destination_bugkets)
new_bugket = True
else:
bugket_id = next_bugketid
next_bugketid = next_bugketid + 1
new_bugket = True
print "New: %i" % (bugket_id)
for bugid in lp_ids:
bugs_to_bugkets[bugid] = bugket_id
for k, v in bugs_to_bugkets.iteritems():
bugkets[v] = bugkets.get(v, [])
bugkets[v].append(k)
cursor = db.cursor()
cursor.execute("SELECT name, url, lp_id, issues.description, attachments.submitted_on FROM issues_ext_launchpad INNER JOIN issues INNER JOIN attachments ON (attachments.issue_id = issues.id AND issues_ext_launchpad.issue_id = issues.id);")
for row in cursor:
name, url, bugid, description, submitted_on = row
if re.search('Stacktrace', name, flags=re.IGNORECASE) is None:
continue
bugket_id = bugs_to_bugkets[bugid]
str_bugid = "%010i" % (bugid)
str_bugketid = "%09i" % (bugket_id)
path = "bugkets/" + str_bugketid + "/" + str_bugid
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
i = 0
filepath = path + "/" + name
bugpath = re.sub('https://', '', url)
while os.path.isfile(filepath):
i = i+1
filepath = path + "/" + name + "." + str(i)
try:
shutil.copyfile(bugpath, filepath)
print filepath
except IOError as exc: # Python >2.5
if exc.errno == errno.ENOENT and os.path.isdir(path):
pass
else: raise
postpath = path + "/Post.txt"
if not os.path.isfile(postpath): # we get this multiple times due to our join denormalizing
with open(postpath, "w") as post_file:
post_file.write(description)
post_file.write("\nDate: " + submitted_on.isoformat())
print postpath
if __name__ == "__main__":
main()
|
VolosHack/Python-presentation
|
workshop/list_comprehensions.py
|
Python
|
mit
| 1,322
| 0.003782
|
# List-Set-Dict comprehension exercises
# - - - - - - - - - - - - - - - - -
# Write a comprehension whose values is the first 10 powers of 2
def bin_power:
# write code here #
return
# Write a list comprehension whose value is the cartesian product
# of two lists A , B.
# Example: A = [1,2,3], B = [4,5,6]:
# A*B = [ (1,4), (1,5), (1,6), (2,4), (2,5), (2,6), (3,4), (3,5), (3,6)
def cartesian_product(A, B):
# write code here #
return
# Write a function which takes two lists A,B and creates using comprehension
# a dictionary whose keys is the items of list A and whose values
# are the items of list B. ( Implement zip built-in function )
# Example: A =
|
['A','B','C'], B = [ 5.0, 7.5, 3.14 ]
# returns: { 'A':5.0, 'B':7.5, 'C':3.14 }
def zip_lists(A, B):
# write code here #
return
# Write a comprehension whose values is the set consisting
# of
|
the odd numbers up to 100
def odd_list:
# write code here #
return
# Write a function which takes a list of positive numbers and returns
# a list that consist of: twice of that number if that number is divisible by 3
# square of that number if that number is not.
# Example: A = [3, 2, 10, 7, 9, 5 ]
# returns: [6, 4, 100, 49, 18, 10]
def filter_list(A):
# write code here #
return
|
nicolasfauchereau/paleopy
|
paleopy/plotting/vector_plot.py
|
Python
|
mit
| 1,697
| 0.0165
|
import numpy as np
from numpy import ma
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap as bm
from mpl_toolkits.basemap import addcyclic
import palettable
class vector_plot:
def __init__(self, ucompos, vcompos):
self.ucompos = ucompos
self.vcompos = vcompos
self.uanoms = self.ucompos.dset['composite_anomalies']
self.vanoms = self.vcompos.dset['composite_anomalies']
self.windspeed = np.sqrt(np.power(self.uanoms, 2) + np.power(self.vanoms, 2))
def plotmap(self, domain = [0., 360., -90., 90.], res='c', stepp=2, scale=20):
latitudes = self.windspeed.latitudes.data
longitudes = self.windspeed.longitudes.data
m = bm(projection='cyl',llcrnrlat=latitudes.min(),urcrnrlat=latitudes.max(),\
llcrnrlon=longitudes.min(),urcrnrlon=longitudes.max(),\
lat_ts=0, resolution=res)
lons, lats = np.meshgrid(longitudes, latitudes)
cmap =
|
palettable.colorbrewer.sequential.Oranges_9.mpl_colormap
f, ax = plt.subplots(figsize=(10,6))
m.ax = ax
x, y = m(lons, lats)
im = m.pcolormesh(lons, lats, self.windspeed.data, cmap=cmap)
cb = m.colorbar(im)
cb.set_label('wind speed (m/s)', fontsize=14)
Q = m.quiver(x[::stepp,::stepp], y[::stepp,::stepp], \
self.uanoms.data[::stepp,::stepp], self.vanoms.data[::stepp,::stepp], \
pivot='middl
|
e', scale=scale)
l,b,w,h = ax.get_position().bounds
qk = plt.quiverkey(Q, l+w-0.1, b-0.03, 5, "5 m/s", labelpos='E', fontproperties={'size':14}, coordinates='figure')
m.drawcoastlines()
return f
|
MaterialsDiscovery/PyChemia
|
pychemia/code/vasp/vaspxml.py
|
Python
|
mit
| 19,297
| 0.00228
|
"""
Created on April 25 2020
@author: Pedram Tavadze
"""
import os
import numpy as np
from .xml_output import parse_vasprun
from .incar import VaspInput
from ..codes import CodeOutput
from ...core import Structure
from ...visual import DensityOfStates
from ...crystal.kpoints import KPoints
class VaspXML(CodeOutput):
def __init__(self, filename='vasprun.xml'):
CodeOutput.__init__(self)
if not os.path.isfile(filename):
raise ValueError('File not found ' + filename)
else:
self.filename = filename
self.spins_dict = {'spin 1': 'Spin-up', 'spin 2': 'Spin-down'}
# self.positions = No
|
ne
# self.stress = None
#self.array_sizes = {}
self.data = self.read()
if self.has_diverged:
return
self.bands = self._get_bands()
self.bands_projected = self._get_bands_projected()
def read(self):
return parse_vasprun(self.filename)
def _get_dos_total(self):
spins = list(self.data['gener
|
al']['dos']
['total']['array']['data'].keys())
energies = np.array(
self.data['general']['dos']['total']['array']['data'][spins[0]])[:, 0]
dos_total = {'energies': energies}
for ispin in spins:
dos_total[self.spins_dict[ispin]] = np.array(
self.data['general']['dos']['total']['array']['data'][ispin])[:, 1]
return dos_total, list(dos_total.keys())
def _get_dos_projected(self, atoms=[]):
if len(atoms) == 0:
atoms = np.arange(self.initial_structure.natom)
if 'partial' in self.data['general']['dos']:
dos_projected = {}
# using this name as vasrun.xml uses ion #
ion_list = ["ion %s" % str(x + 1) for x in atoms]
for i in range(len(ion_list)):
iatom = ion_list[i]
name = self.initial_structure.symbols[atoms[i]] + str(atoms[i])
spins = list(
self.data['general']['dos']['partial']['array']['data'][iatom].keys())
energies = np.array(
self.data['general']['dos']['partial']['array']['data'][iatom][spins[0]][spins[0]])[:, 0]
dos_projected[name] = {'energies': energies}
for ispin in spins:
dos_projected[name][self.spins_dict[ispin]] = np.array(
self.data['general']['dos']['partial']['array']['data'][iatom][ispin][ispin])[:, 1:]
return dos_projected, self.data['general']['dos']['partial']['array']['info']
else:
print("This calculation does not include partial density of states")
return None, None
def _get_bands(self):
spins = list(self.data["general"]["eigenvalues"]
["array"]["data"].keys())
kpoints_list = list(
self.data["general"]["eigenvalues"]["array"]["data"]["spin 1"].keys())
eigen_values = {}
nbands = len(
self.data["general"]["eigenvalues"]["array"]["data"][spins[0]][
kpoints_list[0]
][kpoints_list[0]]
)
nkpoints = len(kpoints_list)
for ispin in spins:
eigen_values[ispin] = {}
eigen_values[ispin]["eigen_values"] = np.zeros(
shape=(nbands, nkpoints))
eigen_values[ispin]["occupancies"] = np.zeros(
shape=(nbands, nkpoints))
for ikpoint, kpt in enumerate(kpoints_list):
temp = np.array(
self.data["general"]["eigenvalues"]["array"]["data"][ispin][kpt][kpt])
eigen_values[ispin]["eigen_values"][:, ikpoint] = (
temp[:, 0] - self.fermi
)
eigen_values[ispin]["occupancies"][:, ikpoint] = temp[:, 1]
return eigen_values
def _get_bands_projected(self):
# projected[iatom][ikpoint][iband][iprincipal][iorbital][ispin]
labels = self.data["general"]["projected"]["array"]["info"]
spins = list(self.data["general"]["projected"]["array"]["data"].keys())
kpoints_list = list(
self.data["general"]["projected"]["array"]["data"][spins[0]].keys()
)
bands_list = list(
self.data["general"]["projected"]["array"]["data"][spins[0]][
kpoints_list[0]
][kpoints_list[0]].keys()
)
bands_projected = {"labels": labels}
nspins = len(spins)
nkpoints = len(kpoints_list)
nbands = len(bands_list)
norbitals = len(labels)
natoms = self.initial_structure.natom
bands_projected["projection"] = np.zeros(
shape=(nspins, nkpoints, nbands, natoms, norbitals)
)
for ispin, spn in enumerate(spins):
for ikpoint, kpt in enumerate(kpoints_list):
for iband, bnd in enumerate(bands_list):
bands_projected["projection"][
ispin, ikpoint, iband, :, :
] = np.array(
self.data["general"]["projected"]["array"]["data"][spn][kpt][
kpt
][bnd][bnd]
)
# # ispin, ikpoint, iband, iatom, iorbital
# bands_projected["projection"] = np.swapaxes(
# bands_projected["projection"], 0, 3)
# # iatom, ikpoint, iband, ispin, iorbital
# bands_projected["projection"] = np.swapaxes(
# bands_projected["projection"], 3, 4)
# # iatom, ikpoint, iband, iorbital, ispin
# bands_projected["projection"] = bands_projected["projection"].reshape(
# natoms, nkpoints, nbands, norbitals, nspins
# )
return bands_projected
@property
def dos_to_dict(self):
"""
Returns the complete density (total,projected) of states as a python dictionary
"""
return {'total': self._get_dos_total(),
'projected': self._get_dos_projected()}
@property
def dos_total(self):
"""
Returns the total density of states as a pychemia.visual.DensityOfSates object
"""
dos_total, labels = self._get_dos_total()
dos_total['energies'] -= self.fermi
return DensityOfStates(
np.array(
[
dos_total[x] for x in dos_total]).T,
title='Total Density Of States',
labels=[
x.capitalize() for x in labels])
@property
def dos_projected(self):
"""
Returns the a list of projected density of states as a pychemia.visual.DensityOfSates object
each element refers to each atom
"""
ret = []
atoms = np.arange(self.initial_structure.natom, dtype=int)
dos_projected, info = self._get_dos_projected(atoms=atoms)
if dos_projected is None:
return None
ndos = len(dos_projected[list(dos_projected.keys())[0]]['energies'])
norbital = len(info) - 1
nspin = len(dos_projected[list(dos_projected.keys())[0]].keys()) - 1
info[0] = info[0].capitalize()
labels = []
labels.append(info[0])
if nspin > 1:
for il in info[1:]:
labels.append(il + '-Up')
for il in info[1:]:
labels.append(il + '-Down')
else:
labels = info
for iatom in dos_projected:
table = np.zeros(shape=(ndos, norbital * nspin + 1))
table[:, 0] = dos_projected[iatom]['energies'] - self.fermi
start = 1
for key in dos_projected[iatom]:
if key == 'energies':
continue
end = start + norbital
table[:, start:end] = dos_projected[iatom][key]
start = end
temp_dos = DensityOfStates(
table, title='Projected Density Of States %s' %
iatom, labels=labels)
ret.append(temp_dos)
return ret
def dos_parametric(self, atoms=None, orbitals=None, spin=None, title=
|
odoo-jarsa/addons-jarsa
|
connector_cva/tests/test_product_template.py
|
Python
|
agpl-3.0
| 1,627
| 0
|
# -*- coding: utf-8 -*-
# © <2016> <Jarsa Sistemas, S.A. de C.V.>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
from mock import MagicMock
from lxml import etree
import requests
class TestProductTemplate(TransactionCase):
"""
This will test model product.template
"""
def setUp(self):
"""
Define global variables
"""
super(TestProductTemplate, self).setUp()
self.cva = self.env['cva.config.settings']
self.xml = requests.get('http://localhost:8069/connector_cva/static/'
'src/xml/test.xml').content
def test_10_update_price_multi(self):
"""
test for methos update_price_multi
"""
product_tem = self.cva.create_product(etree.XML(self.xml)[1])
product = product_tem.with
|
_context(
{'active_ids': product_tem.ids})
product.update_price_multi()
product_template = self.cva.create_product(etree.XML(self.xml)[0])
cva = self.cva.create({
'name': '40762',
'main_location': self.env.ref('connector_cva.loc_torreon').id})
cva.execute()
cva.connect_cva = MagicMock()
|
cva.connect_cva.return_value = etree.XML(self.xml)
product = product_template.with_context(
{'active_ids': product_template.ids})
product.write({
'standard_price': 0.00,
})
product.update_price_multi()
self.assertEqual(product.standard_price, 114.94,
'Product is not Update')
|
chengzhoukun/LeetCode
|
112. Path Sum/Path Sum.py
|
Python
|
lgpl-3.0
| 599
| 0.001669
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @param {integer} sum
# @return {boolean}
def hasPathSum(self, root, sum):
if root is None:
return False
elif root.val == sum and
|
root.left is None and root.
|
right is None:
return True
else:
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(
root.right, sum - root.val
)
|
kiriappeee/reply-later
|
src/core/tests/TestMessageSender.py
|
Python
|
mit
| 14,145
| 0.011453
|
import unittest
from unittest.mock import Mock, patch
import copy
from datetime import timezone, timedelta, datetime
from ..reply.Reply import Reply
from ..reply import ReplyCRUD
from ..user.User import User
from ..messager import TweetAdapter, MessageBreaker, MessageSender
from ..data import DataConfig
from ..scheduler import Scheduler
class TestMessageSender(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#example of how to patch a class
"""
@patch('tweepy.API.update_status')
def test_tweetLibraryCanBePatched(self, patchClass):
patchClass.return_value = "success"
self.assertTrue(True)
self.assertEqual(TweetAdapter.updateStatus("abc", "bad"), "success")
self.assertEqual(patchClass.call_args[0], ("abc", "bad"))
"""
def test_messageIsBrokenDownCorrectlyBeforeSending(self):
user = User('test', '123456-012e1', '123h4123asdhh123', timezone(timedelta(hours = 5, minutes = 30)))
mockUserDataStrategy = Mock()
mockUserDataStrategyAttrs = {"getUserById.return_value": user }
mockUserDataStrategy.configure_mock(**mockUserDataStrategyAttrs)
d = datetime.now(tz = timezone(timedelta(hours=5, minutes=30))) + timedelta(minutes=20)
replyToSend = Reply(1, "@example an example message", d, timezone(timedelta(hours=5, minutes=30)), 134953292, replyId = 1)
self.assertEqual(MessageBreaker.breakMessage(replyToSend.message, replyToSend.tweetId, 1, mockUserDataStrategy), ["@example an example message"])
@patch.object(TweetAdapter, 'getUrlLengths')
@patch.object(TweetAdapter, 'getUsernameForTweet')
def test_messageIsBrokenDownCorrectlyBeforeSendingWhenContainingLinks(self, patchMethod, urlLengthPatch):
user = User('test', '123456-012e1', '123h4123asdhh123', timezone(timedelta(hours = 5, minutes = 30)))
patchMethod.return_value = "example"
urlLengthPatch.return_value = (23,23)
mockUserDataStrategy = Mock()
mockUserDataStrategyAttrs = {"getUserById.return_value": user }
mockUserDataStrategy.configure_mock(**mockUserDataStrategyAttrs)
d = datetime.now(tz = timezone(timedelta(hours=5, minutes=30))) + timedelta(minutes=20)
replyToSend = Reply(1, "... Facebook biased - when did people give up on the process of getting informed? http://blog.theoldreader.com/post/144197778539/facebook-biased via @theoldreader", d, timezone(timedelta(hours=5, minutes=30)), 134953292, replyId = 1)
self.assertEqual(MessageBreaker.breakMessage(replyToSend.message, replyToSend.tweetId, 1, mockUserDataStrategy), ["... Facebook biased - when did people give up on the process of getting informed? http://blog.theoldreader.com/post/144197778539/facebook-biased via @theoldreader"])
@patch.object(TweetAdapter, 'getUrlLengths')
@patch.object(TweetAdapter, 'getUsernameForTweet')
def test_messageIsBrokenDownCorrectlyWhenMoreThan140Chars(self, patchMethod, urlLengthPatch):
patchMethod.return_value = "example"
urlLengthPatch.return_value = (23,23)
user = User('test', '123456-012e1', '123h4123asdhh123', timezone(timedelta(hours = 5, minutes = 30)))
mockUserDataStrategy = Mock()
mockUserDataStrategyAttrs = {"getUserById.return_value": user }
mockUserDataStrategy.configure_mock(**mockUserDataStrategyAttrs)
d = datetime.now(tz = timezone(timedelta(hours=5, minutes=30))) + timedelta(minutes=20)
replyToSend = Reply(1,
"@example an example message that is just way too long to be kept inside a single tweet. Therefore it will be broken down into lots of little messages each having the example username on top of it. Sounds cool? Keep going! I'd really like to make this message about 3 tweets long so that I can make sure that the module is working properly. Like really well.",
d, timezone(timedelta(hours=5, minutes=30)), 134953292, replyId = 1)
self.assertEqual(MessageBreaker.breakMessage(replyToSend.message, replyToSend.tweetId, 1, mockUserDataStrategy), ["@example an example message that is just way too long to be kept inside a single tweet. Therefore it will be broken down into lots of little",
"@example messages each having the example username on top of it. Sounds cool? Keep going! I'd really like to make this message about 3",
"@example tweets long so that I can make sure that the module is working properly. Like really well."])
patchMethod.assert_any_call(134953292, 1, mockUserDataStrategy)
replyToSend.message = "@example testing what happens to long tweets with links. There are 50 characters here that I will insert now I'm just interested in seeing what gets truncated after 140 characters"
self.assertEqual(MessageBreaker.breakMessage(replyToSend.message, replyToSend.tweetId, 1, mockUserDataStrategy),
["@example testing what happens to long tweets with links. There are 50 characters here that I will insert now I'm just interested in seeing", "@example what gets truncated after 140 characters"])
@patch.object(TweetAdapter, 'getUrlLengths')
@patch.object(TweetAdapter, 'getUsernameForTweet')
def test_messageIsBrokenDownCorrectlyWhenMoreThan140CharsAndContainsLinks(self, patchMethod, urlLengthPatch):
patchMethod.return_value = "example"
urlLengthPatch.return_value = (23,23)
user = User('test', '123456-012e1', '123h4123asdhh123', timezone(timedelta(hours = 5, minutes = 30)))
mockUserDataStrategy = Mock()
mockUserDataStrategyAttrs = {"getUserById.return_value": user }
mockUserDataStrategy.configure_mock(**mockUserDataStrategyAttrs)
d = datetime.now(tz = timezone(timedelta(hours=5, minutes=30))) + timedelta(minutes=20)
replyToSend = Reply(1,
"@example an example message that is just way too long to be kept inside a single tweet. It also contains a link to http://replylater.adnanissadeen.com that should become shortened. Therefore it will be broken down into lots of little messages each having the example username on top of it. Sounds cool? Keep going! Throw in one more link like https://blog.bufferapp.com/twitter-polls for good measure (also, https). I'd really like to make this message more than 3 tweets long so that I can make sure that the module is working properly. Like really well.",
d, timezone(timedelta(hours=5, minutes=30)), 134953292, replyId = 1)
m1 = "@example an example message that is just way too long to be kept inside a single tweet. It also contains a link to http://replylater.adnanissadeen.com"
m2 = "@example that should become shortened. Therefore it will be broken down into lots of little messages each having the example username on top"
m3 = "@example of it. Sounds cool? Keep going! Throw in one more link like https://blog.bufferapp.com/twitter-polls for good measure (also, https). I'd really like"
m4 = "@example to make this message more than 3 tweets long so that I can make sure that the module is working properly. Like really well."
self.assertEqual(MessageBreaker.breakMessage(replyToSend.message, replyToSend.tweetId, 1, mockUserDataStrategy), [m1,m2,m3,m4])
patchMethod.assert_any_call(134953292, 1, mockUserDataStrategy)
@patch.object(TweetAdapter, 'getUrlLengths')
@patch.object(TweetAdapter, 'getUsernameForTweet')
@patch.object(Tw
|
eetAdapter, 'sendReply')
def test_messageIsSentCorrectlyWhenUnder140Chars(self, sendReplyPatch, usernameMethod, urlLengthPatch):
sendReplyPatch.side_effect = [1234]
urlLengthPatch.return_value = (23,23)
usernameMethod.return_value = "example"
d = datetime.now(tz = timezone(timedelta(hours=5, minutes=30))) + timedelta(minutes=20)
replyToSend = Reply(1, "@example an example message", d, timezone(timedelta(hours=5, minutes=30)), 134953
|
292, replyId = 1)
mockReplyDataStrategy = Mock()
mockReplyDataStrategyAttrs = {"getReplyByReplyId.side_effect": [copy.deepcopy(replyToSend),
|
lo0ol/Ultimaker-Cura
|
plugins/LayerView/__init__.py
|
Python
|
agpl-3.0
| 997
| 0.007021
|
# Copyright (c) 2015 Ultimak
|
er B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import LayerView, LayerViewProxy
from PyQt5.QtQml import qmlRegisterType, qmlRegisterSingletonType
from UM.i18n import i18nCatalog
catalo
|
g = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": "Layer View",
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("Layer View plugin description", "Provides the Layer view."),
"api": 2
},
"view": {
"name": catalog.i18nc("Layers View mode", "Layers"),
"view_panel": "LayerView.qml"
}
}
def createLayerViewProxy(engine, script_engine):
return LayerViewProxy.LayerViewProxy()
def register(app):
layer_view = LayerView.LayerView()
qmlRegisterSingletonType(LayerViewProxy.LayerViewProxy, "UM", 1, 0, "LayerView", layer_view.getProxy)
return { "view": LayerView.LayerView() }
|
robertwb/incubator-beam
|
sdks/python/apache_beam/testing/load_tests/load_test_metrics_utils.py
|
Python
|
apache-2.0
| 19,588
| 0.006586
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions used for integrating Metrics API into load tests pipelines.
Metrics are send to BigQuery in following format:
test_id | submit_timestamp | metric_type | value
The 'test_id' is common for all metrics for one run.
Currently it is possible to have following metrics types:
* runtime
* total_bytes_count
"""
# pytype: skip-file
import json
import logging
import time
import uuid
from typing import Any
from typing import List
from typing import Mapping
from typing import Optional
from typing import Union
import requests
from requests.auth import HTTPBasicAuth
import apache_beam as beam
from apache_beam.metrics import Metrics
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import Timestamp
try:
from google.cloud import bigquery # type: ignore
from google.cloud.bigquery.schema import SchemaField
from google.cloud.exceptions import NotFound
except ImportError:
bigquery = None
SchemaField = None
NotFound = None
RUNTIME_METRIC = 'runtime'
COUNTER_LABEL = 'total_bytes_count'
ID_LABEL = 'test_id'
SUBMIT_TIMESTAMP_LABEL = 'timestamp'
METRICS_TYPE_LABEL = 'metric'
VALUE_LABEL = 'value'
SCHEMA = [{
'name': ID_LABEL, 'field_type': 'STRING', 'mode': 'REQUIRED'
},
{
'name': SUBMIT_TIMESTAMP_LABEL,
'field_type': 'TIMESTAMP',
'mode': 'REQUIRED'
},
{
'name': METRICS_TYPE_LABEL,
'field_type': 'STRING',
'mode': 'REQUIRED'
}, {
'name': VALUE_LABEL, 'field_type': 'FLOAT', 'mode': 'REQUIRED'
}]
_LOGGER = logging.getLogger(__name__)
def parse_step(step_name):
"""Replaces white spaces and removes 'Step:' label
Args:
step_name(str): step name passed in metric ParDo
Returns:
lower case step name without namespace and step label
"""
return step_name.lower().replace(' ', '_').strip('step:_')
def split_metrics_by_namespace_and_name(metrics, namespace, name):
"""Splits metrics list namespace and name.
Args:
metrics: list of metrics from pipeline result
namespace(str): filter metrics by namespace
name(str): filter metrics by name
Returns:
two lists - one of metrics which are matching filters
and second of not matching
"""
matching_metrics = []
not_matching_metrics = []
for dist in metrics:
if dist.key.metric.namespace == namespace\
and dist.key.metric.name == name:
matching_metrics.append(dist)
else:
not_matching_metrics.append(dist)
return matching_metrics, not_matching_metrics
def get_generic_distributions(generic_dists, metric_id):
"""Creates flatten list of distributions per its value type.
A generic distribution is the one which is not processed but saved in
the most raw version.
Args:
generic_dists: list of distributions to be saved
metric_id(uuid): id of the current test run
Returns:
list of dictionaries made from :class:`DistributionMetric`
"""
return sum((
get_all_distributions_by_type(dist, metric_id) for dist in generic_dists),
[])
def get_all_distributions_by_type(dist, metric_id):
"""Creates new list of objects with type of each distribution
metric value.
Args:
dist(object): DistributionMetric object to be parsed
metric_id(uuid): id of the current test run
Return
|
s:
list of :class:`DistributionMetric` objects
"""
submit_timestamp = time.time()
dist_types = ['count', 'max', 'min', 'sum']
distribution_dicts = []
for dist_type in dist_types:
try:
distribution_dicts.append(
get_distribution_dict(dist_type, submit_timestamp, dist, metric_id))
except ValueError:
# Ignore metrics with 'None' values.
continue
return distribution_dicts
def get_distribution_dict(metric_type
|
, submit_timestamp, dist, metric_id):
"""Function creates :class:`DistributionMetric`
Args:
metric_type(str): type of value from distribution metric which will
be saved (ex. max, min, mean, sum)
submit_timestamp: timestamp when metric is saved
dist(object) distribution object from pipeline result
metric_id(uuid): id of the current test run
Returns:
dictionary prepared for saving according to schema
"""
return DistributionMetric(dist, submit_timestamp, metric_id,
metric_type).as_dict()
class MetricsReader(object):
"""
A :class:`MetricsReader` retrieves metrics from pipeline result,
prepares it for publishers and setup publishers.
"""
publishers = [] # type: List[Any]
def __init__(
self,
project_name=None,
bq_table=None,
bq_dataset=None,
publish_to_bq=False,
influxdb_options=None, # type: Optional[InfluxDBMetricsPublisherOptions]
namespace=None,
filters=None):
"""Initializes :class:`MetricsReader` .
Args:
project_name (str): project with BigQuery where metrics will be saved
bq_table (str): BigQuery table where metrics will be saved
bq_dataset (str): BigQuery dataset where metrics will be saved
namespace (str): Namespace of the metrics
filters: MetricFilter to query only filtered metrics
"""
self._namespace = namespace
self.publishers.append(ConsoleMetricsPublisher())
check = project_name and bq_table and bq_dataset and publish_to_bq
if check:
bq_publisher = BigQueryMetricsPublisher(
project_name, bq_table, bq_dataset)
self.publishers.append(bq_publisher)
if influxdb_options and influxdb_options.validate():
self.publishers.append(InfluxDBMetricsPublisher(influxdb_options))
else:
_LOGGER.info(
'Missing InfluxDB options. Metrics will not be published to '
'InfluxDB')
self.filters = filters
def publish_metrics(self, result, extra_metrics: dict):
metric_id = uuid.uuid4().hex
metrics = result.metrics().query(self.filters)
# Metrics from pipeline result are stored in map with keys: 'gauges',
# 'distributions' and 'counters'.
# Under each key there is list of objects of each metric type. It is
# required to prepare metrics for publishing purposes. Expected is to have
# a list of dictionaries matching the schema.
insert_dicts = self._prepare_all_metrics(metrics, metric_id)
insert_dicts += self._prepare_extra_metrics(extra_metrics, metric_id)
if len(insert_dicts) > 0:
for publisher in self.publishers:
publisher.publish(insert_dicts)
def _prepare_extra_metrics(self, extra_metrics: dict, metric_id: str):
ts = time.time()
return [
Metric(ts, metric_id, v, label=k).as_dict() for k,
v in extra_metrics.items()
]
def publish_values(self, labeled_values):
"""The method to publish simple labeled values.
Args:
labeled_values (List[Tuple(str, int)]): list of (label, value)
"""
metric_dicts = [
Metric(time.time(), uuid.uuid4().hex, value, label=label).as_dict()
for label,
value in labeled_values
]
for publisher in self.publishers:
publisher.publish(metric_dicts)
def _prepare_all_metrics(self, metrics, metric_id):
insert_rows = self._get_counters(metrics['counters'], metric_id)
insert_rows += self._get_distributions(metrics['distributions'], metric_id)
return insert_rows
def _ge
|
svanschalkwyk/datafari
|
windows/python/Lib/test/test_popen2.py
|
Python
|
apache-2.0
| 4,315
| 0.002086
|
"""Test script for popen2.py"""
import warnings
warnings.filterwarnings("ignore", ".*popen2 module is deprecated.*",
DeprecationWarning)
warnings.filterwarnings("ignore", "os\.popen. is deprecated.*",
DeprecationWarning)
import os
import sys
import unittest
import popen2
from test.test_support import run_unittest, reap_children
if sys.platform[:4] == 'beos' or sys.platform[:6] == 'atheos':
|
# Locks get messed up or something. Generally we're supposed
# to avoid mixing "posix" fork & exec with native threads, and
# they may be right about that after all.
raise unittest.SkipTest("popen2() doesn't work on " + sys.platform)
# if we don't have os.popen, check that
# we have os.fork. if not, skip the test
# (by raising an ImportError)
try:
from os import popen
del popen
except ImportError:
fr
|
om os import fork
del fork
class Popen2Test(unittest.TestCase):
cmd = "cat"
if os.name == "nt":
cmd = "more"
teststr = "ab cd\n"
# "more" doesn't act the same way across Windows flavors,
# sometimes adding an extra newline at the start or the
# end. So we strip whitespace off both ends for comparison.
expected = teststr.strip()
def setUp(self):
popen2._cleanup()
# When the test runs, there shouldn't be any open pipes
self.assertFalse(popen2._active, "Active pipes when test starts" +
repr([c.cmd for c in popen2._active]))
def tearDown(self):
for inst in popen2._active:
inst.wait()
popen2._cleanup()
self.assertFalse(popen2._active, "popen2._active not empty")
# The os.popen*() API delegates to the subprocess module (on Unix)
import subprocess
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
reap_children()
def validate_output(self, teststr, expected_out, r, w, e=None):
w.write(teststr)
w.close()
got = r.read()
self.assertEqual(expected_out, got.strip(), "wrote %r read %r" %
(teststr, got))
if e is not None:
got = e.read()
self.assertFalse(got, "unexpected %r on stderr" % got)
def test_popen2(self):
r, w = popen2.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_popen3(self):
if os.name == 'posix':
r, w, e = popen2.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
r, w, e = popen2.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_os_popen2(self):
# same test as test_popen2(), but using the os.popen*() API
if os.name == 'posix':
w, r = os.popen2([self.cmd])
self.validate_output(self.teststr, self.expected, r, w)
w, r = os.popen2(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
w, r = os.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_os_popen3(self):
# same test as test_popen3(), but using the os.popen*() API
if os.name == 'posix':
w, r, e = os.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
w, r, e = os.popen3(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
got = e.read()
self.assertFalse(got, "unexpected %r on stderr" % got)
w, r, e = os.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_os_popen4(self):
if os.name == 'posix':
w, r = os.popen4([self.cmd])
self.validate_output(self.teststr, self.expected, r, w)
w, r = os.popen4(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
w, r = os.popen4(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_main():
run_unittest(Popen2Test)
if __name__ == "__main__":
test_main()
|
mortonjt/micronota
|
micronota/bfillings/_base.py
|
Python
|
bsd-3-clause
| 5,337
| 0
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from os import makedirs
from abc import ABCMeta, abstractmethod
from tempfile import mkdtemp, NamedTemporaryFile
from inspect import signature
from pandas import DataFrame
from skbio import Sequence
class SubclassImplementError(Exception):
'''Raised when a subclass do not follow the enforcement.'''
def __init__(self, cls, message=('This class definition violates '
'the enforced rule of its parent class')):
super().__init__('%s: %s' % (message, cls))
class IntervalMetadataPred(metaclass=ABCMeta):
'''
Attributes
----------
fp : str
input file path of fasta seq.
dat : str
data file (eg database) needed to run the app.
out_dir : str
output directory
tmp_dir : str
temp directory
'''
@classmethod
def __subclasshook__(cls, C):
'''Enforce the API of functions in child classes.'''
if cls is IntervalMetadataPred:
f = C.__dict__['_identify_fp']
sig = signature(f)
# enforce it to return dict
if not issubclass(sig.return_annotation, dict):
raise SubclassImplementError(C)
return True
def _
|
_init__(self, dat, out_dir, tmp_dir=None):
self.dat = dat
self.out_dir = out_dir
# create dir if not exist
makedirs(self.out_dir, exist_ok=True)
if tmp_dir is None:
self.tmp_dir = mkdtemp(prefix='tmp', dir=out_dir)
else:
self.tmp_dir = tmp_dir
makedirs(self.tmp_dir, exist_ok=True)
def __ca
|
ll__(self, input, **kwargs) -> dict:
'''Identify features for the input.
Parameters
----------
input : ``skbio.Sequence`` or sequence file.
Yield
-----
dict passable to ``skbio.metadata.IntervalMetadata``
'''
if isinstance(input, Sequence):
return self._identify_seq(input, **kwargs)
elif isinstance(input, str):
return self._identify_fp(input, **kwargs)
def _identify_seq(self, seq, **kwargs):
'''Identify features on the input sequence.
Parameters
----------
seq : ``skbio.Sequence`` object
Returns
-------
dict passable to ``skbio.metadata.IntervalMetadata``
'''
with NamedTemporaryFile('w+', self.tmp_dir) as f:
seq.write(f)
self._identify_features_fp(f.name, **kwargs)
@abstractmethod
def _identify_fp(self, fp, **kwargs):
'''Identify features on the sequence in the input file.'''
def has_cache(self):
return self.cache is not None
class MetadataPred(metaclass=ABCMeta):
'''
Attributes
----------
dat : list of str
list of data files (eg database) needed to run the app.
out_dir : str
output directory
tmp_dir : str
temp directory
'''
@classmethod
def __subclasshook__(cls, C):
'''Enforce the API of functions in child classes.'''
if cls is MetadataPred:
f = C.__dict__['_annotate_fp']
sig = signature(f)
# enforce it to return dict
if not issubclass(sig.return_annotation, DataFrame):
raise SubclassImplementError(C)
return True
def __init__(self, dat, out_dir, tmp_dir=None):
self.dat = dat
self.out_dir = out_dir
# create dir if not exist
makedirs(self.out_dir, exist_ok=True)
if tmp_dir is None:
self.tmp_dir = mkdtemp(prefix='tmp', dir=out_dir)
else:
self.tmp_dir = tmp_dir
makedirs(self.tmp_dir, exist_ok=True)
def __call__(self, input, **kwargs):
'''
Parameters
----------
input : list of ``skbio.Sequence`` or sequence file.
Returns
-------
pd.DataFrame
row name should be the query seq id. each column is data
of e-value, bitscore, etc. For protein sequences, a column
named 'sseqid' is mandatory to record the seq id of the hit.
'''
if isinstance(input, Sequence):
return self._annotate_seq(input, **kwargs)
elif isinstance(input, str):
return self._annotate_fp(input, **kwargs)
def _annotate_seq(self, seq, **kwargs):
'''Add metadata to the input seq.
Assign the function, product, cross-reference, etc. info to the
input sequence.
Parameters
----------
seq : ``skbio.Sequence`` object
'''
with NamedTemporaryFile('w+', self.tmp_dir) as f:
seq.write(f)
self._annotate_fp(f.name, **kwargs)
def has_cache(self):
return self.cache is not None
@abstractmethod
def _annotate_fp(self, fp, **kwargs):
'''Add metadata to the sequences in the input file.
Parameters
----------
fp : input file of sequences
'''
|
fkorotkov/pants
|
tests/python/pants_test/bin/test_loader_integration.py
|
Python
|
apache-2.0
| 1,537
| 0.004554
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class LoaderIntegrationTest(PantsRunIntegrationTest):
def test_invalid_locale(self):
pants_run = self.run_pants(command=['help'], extra_env={'LC_ALL': 'iNvALiD-lOcALe'})
self.assert_failure(pants_run)
self.assertIn('Could not get a valid locale.', pants_run.stderr_data)
self.assertIn('iNvALiD-lOcALe', pants_run.stderr_data)
def test_alternate_entrypoint(self):
pants_run = self.run_pants(
command=['help'],
extra_env={'PANTS_ENTRYPOINT': 'pants.bin.pant
|
s_exe:test'}
)
self.assert_success(pants_run)
self.assertIn('T E S T', pants_run.stdout_data)
def test_alternate_entrypoint_bad(self):
pants_run = self.run_pants(command=['help'], extra_env={'PANTS_ENTRYPOINT': 'badness'})
self.assert_failure(pants_run)
self.assertIn('entrypoint must be', pants_run.stderr_data)
def test_alternate_entrypoint_not_callable(self):
pants_run = self.run_pants(
|
command=['help'],
extra_env={'PANTS_ENTRYPOINT': 'pants.bin.pants_exe:TEST_STR'}
)
self.assert_failure(pants_run)
self.assertIn('TEST_STR', pants_run.stderr_data)
self.assertIn('not callable', pants_run.stderr_data)
|
maning/inasafe
|
realtime/test_shake_event.py
|
Python
|
gpl-3.0
| 24,765
| 0.000202
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Shake Event Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@linfiniti.com'
__version__ = '0.5.0'
__date__ = '2/08/2012'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import ogr
import os
import shutil
import unittest
import logging
import difflib
import PyQt4
# pylint: disable=E0611
# pylint: disable=W0611
from qgis.core import QgsFeatureRequest
# pylint: enable=E0611
# pylint: enable=W0611
from safe.api import unique_filename, temp_dir
from safe_qgis.utilities_test import getQgisTestApp
from utils import shakemapExtractDir, shakemapZipDir, dataDir
from shake_event import ShakeEvent
# The logger is intialised in utils.py by init
LOGGER = logging.getLogger('InaSAFE')
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestShakeEvent(unittest.TestCase):
"""Tests relating to shake events"""
def setUp(self):
"""Copy our cached dataset from the fixture dir to the cache dir"""
myOutFile = '20120726022003.out.zip'
myInpFile = '20120726022003.inp.zip'
myOutPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'fixtures',
myOutFile))
myInpPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'fixtures',
myInpFile))
shutil.copyfile(myOutPath, os.path.join(shakemapZipDir(), myOutFile))
shutil.copyfile(myInpPath, os.path.join(shakemapZipDir(), myInpFile))
#TODO Downloaded data should be removed before each test
def test_gridXmlFilePath(self):
"""Test eventFilePath works(using cached data)"""
myShakeId = '20120726022003'
myExpectedPath = os.path.join(shakemapExtractDir(),
myShakeId,
'grid.xml')
myShakeEvent = ShakeEvent(myShakeId)
myPath = myShakeEvent.gridFilePath()
self.assertEquals(myExpectedPath, myPath)
def test_eventParser(self):
"""Test eventFilePath works (using cached data)"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
self.assertEquals(26, myShakeEvent.day)
self.assertEquals(7, myShakeEvent.month)
self.assertEquals(2012, myShakeEvent.year)
self.assertEquals(2, myShakeEvent.hour)
self.assertEquals(15, myShakeEvent.minute)
self.assertEquals(35, myShakeEvent.second)
self.assertEquals('WIB', myShakeEvent.timeZone)
self.assertEquals(124.45, myShakeEvent.longitude)
self.assertEquals(-0.21, myShakeEvent.latitude)
self.assertEquals(11.0, myShakeEvent.depth)
self.assertEquals('Southern Molucca Sea', myShakeEvent.location)
self.assertEquals(122.45, myShakeEvent.xMinimum)
self.assertEquals(126.45, myShakeEvent.xMaximum)
self.assertEquals(-2.21, myShakeEvent.yMinimum)
self.assertEquals(1.79, myShakeEvent.yMaximum)
myGridXmlData = myShakeEvent.mmiData
self.assertEquals(25921, len(myGridXmlData))
myDelimitedString = myShakeEvent.mmiDataToDelimitedText()
self.assertEqual(578234, len(myDelimitedString))
def test_eventGridToCsv(self):
"""Test grid data can be written to csv"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
myPath = myShakeEvent.mmiDataToDelimitedFile(theForceFlag=True)
myFile = file(myPath, 'rt')
myString = myFile.readlines()
myFile.close()
self.assertEqual(25922, len(myString))
def testEventToRaster(self):
"""Check we can convert the shake event to a raster"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
myExpectedState = """latitude: -0.21
longitude: 124.45
eventId: 20120726022003
magnitude: 5.0
depth: 11.0
description: None
location: Southern Molucca Sea
day: 26
month: 7
year: 2012
time: None
timeZone: WIB
xMinimum: 122.45
xMaximum: 126.45
yMinimum: -2.21
yMaximum: 1.79
rows: 161.0
columns: 161.0
mmiData: Populated
populationRasterPath: None
impactFile: None
impactKeywordsFile: None
fatalityCounts: None
displacedCounts: None
affectedCounts: None
extentWithCities: Not set
zoomFactor: 1.25
searchBoxes: None
"""
myState = str(myShakeEvent)
myMessage = (('Expected:\n----------------\n%s'
'\n\nGot\n------------------\n%s\n') %
(myExpectedState, myState))
assert myState == myExpectedState, myMessage
myPath = myShakeEvent.mmiDataToRaster(theForceFlag=True)
assert os.path.exists(myPath)
myExpectedQml = myPath.replace('tif', 'qml')
assert os.path.exists(myExpectedQml)
myExpectedKeywords = myPath.replace('tif', 'keywords')
assert os.path.exists(myExpectedKeywords)
def testEventToShapefile(self):
"""Check we can convert the shake event to a raster"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
myPath = myShakeEvent.mmiDataToShapefile(theForceFlag=True)
assert os.path.exists(myPath)
myExpectedQml = myPath.replace('shp', 'qml')
myMessage = '%s not found' % myExpectedQml
assert os.path.exists(myExpectedQml), myMessage
def checkFeatureCount(self, thePath, theCount):
myDataSource = ogr.Open(thePath)
myBaseName = os.path.splitext(os.path.basename(thePath))[0]
# do a little query to make sure we got some results...
mySQL = 'select * from \'%s\' order by MMI asc' % myBaseName
#print mySQL
myLayer = myDataSource.ExecuteSQL(mySQL)
myCount = myLayer.GetFeatureCount()
myFlag = myCount == theCount
myMessage = ''
|
if not myFlag:
myMessage = 'Expected %s features, got %s' % (theCount, myCount)
myDataSource.ReleaseResultSet(myLayer)
myDataSource.Destroy()
return myFlag, myMessage
def testEventToContours(self):
"""Check we can extract contours from the event"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
myPath = myShakeEvent.mmiDataToCont
|
ours(theForceFlag=True,
theAlgorithm='invdist')
assert self.checkFeatureCount(myPath, 16)
assert os.path.exists(myPath)
myExpectedQml = myPath.replace('shp', 'qml')
myMessage = '%s not found' % myExpectedQml
assert os.path.exists(myExpectedQml), myMessage
myPath = myShakeEvent.mmiDataToContours(theForceFlag=True,
theAlgorithm='nearest')
assert self.checkFeatureCount(myPath, 132)
myPath = myShakeEvent.mmiDataToContours(theForceFlag=True,
theAlgorithm='average')
assert self.checkFeatureCount(myPath, 132)
def testLocalCities(self):
"""Test that we can retrieve the cities local to the event"""
myShakeId = '20120726022003'
myShakeEvent = ShakeEvent(myShakeId)
# Get teh mem layer
myCitiesLayer = myShakeEvent.localCitiesMemoryLayer()
myProvider = myCitiesLayer.dataProvider()
myExpectedFeatureCount = 6
self.assertEquals(myProvider.featureCount(), myExpectedFeatureCount)
myStrings = []
myRequest = QgsFeatureRequest()
for myFeature in myCitiesLayer.getFeatures(myRequest):
# fetch map of attributes
myAttributes = myCitiesLayer.dataProvider().attributeIndexes()
for myKey in myAttributes:
|
cjhak/b2share
|
invenio/modules/records/views.py
|
Python
|
gpl-2.0
| 13,449
| 0.000446
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSearch Flask Blueprint."""
import cStringIO
from functools import wraps
from flask import g, render_template, request, flash, redirect, url_for, \
current_app, abort, Blueprint, send_file
from flask_breadcrumbs import register_breadcrumb
from flask_breadcrumbs import default_breadcrumb_root
from flask_login import current_user
from flask_menu import register_menu
from invenio.base.decorators import wash_arguments
from invenio.base.globals import cfg
from invenio.base.i18n import _
from invenio.base.signals import pre_template_render
from invenio.config import CFG_SITE_RECORD
from invenio.ext.template.context_processor import \
register_template_context_processor
from invenio.modules.search.models import Collection
from invenio.modules.search.signals import record_viewed
from invenio.utils import apache
from .api import get_record
from .models import Record as Bibrec
from .utils import references_nb_counts, citations_nb_counts, \
visible_collection_tabs
blueprint = Blueprint('record', __name__, url_prefix="/" + CFG_SITE_RECORD,
static_url_path='/record', template_folder='templates',
static_folder='static')
default_breadcrumb_root(blueprint, 'breadcrumbs.record')
def request_record(f):
"""Perform standard operation to check record availability for user."""
@wraps(f)
def decorated(recid, *args, **kwargs):
from invenio.modules.access.mailcookie import \
mail_cookie_create_authorize_action
from invenio.modules.access.local_config import VIEWRESTRCOLL
from invenio.legacy.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record
from invenio.b2share.modules.main.utils import check_fresh_record
# ensure recid to be integer
recid = int(recid)
from invenio.legacy.search_engine import record_exists, get_merged_recid
if record_exists(recid) == 0:
# record doesn't exist, abort so it doesn't get incorrectly cached
abort(apache.HTTP_NOT_FOUND) # The record is gone!
if check_fresh_record(current_user, recid):
return render_template('record_waitforit.html', recid=recid)
g.collection = collection = Collection.query.filter(
Collection.name == guess_primary_collection_of_a_record(recid)).\
one()
(auth_code, auth_msg) = check_user_can_view_record(current_user, recid)
# only superadmins can use verbose parameter for obtaining debug
# information
if not current_user.is_super_admin and 'verbose' in kwargs:
kwargs['verbose'] = 0
if auth_code and current_user.is_guest:
cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, {
'collection': g.collection.name})
url_args = {'action': cookie, 'ln': g.ln, 'referer': request.url}
flash(_("Authorization failure"), 'error')
return redirect(url_for('webaccount.login', **url_args))
elif auth_code:
flash(auth_msg, 'error')
abort(apache.HTTP_UNAUTHORIZED)
from invenio.legacy.search_engine import record_exists, \
get_merged_recid
# check if the current record has been deleted
# and has been merged, case in which the deleted record
# will be redirect to the new one
record_status = record_exists(recid)
merged_recid = get_merged_recid(recid)
if record_status == -1 and merged_recid:
return redirect(url_for('record.metadata', recid=merged_recid))
elif record_status == -1:
abort(apache.HTTP_GONE) # The record is gone!
g.bibrec = Bibrec.query.get(recid)
record = get_record(recid)
if record is None:
return render_template('404.html')
title = record.get(cfg.get('RECORDS_BREADCRUMB_TITLE_KEY'), '')
tabs = []
if cfg.get('CFG_WEBLINKBACK_TRACKBACK_ENABLED'):
@register_template_context_processor
def trackback_context():
from invenio.legacy.weblinkback.templates import \
get_trackback_auto_discovery_tag
return {'headerLinkbackTrackbackLink':
get_trackback_auto_discovery_tag(recid)}
def _format_record(recid, of='hd', user_info=current_user, *args,
**kwargs):
from invenio.modules.formatter import format_record
|
return format_record(recid, of, user_info=user_info, *args,
**kwargs)
@register_template_context_processor
def record_context():
from invenio.modules.comments.api import get_mini_reviews
from invenio.legacy.bibdocfile.api import BibRecDocs
all_files = [f for f in BibRecDocs(recid, human_readable=True).list_latest_files(list_hidden=False) \
if not f.is_icon()]
files = [f
|
for f in all_files if f.is_restricted(current_user)[0] == 0]
has_private_files = len(files) < len(all_files)
return dict(recid=recid,
record=record,
tabs=tabs,
title=title,
get_mini_reviews=get_mini_reviews,
collection=collection,
format_record=_format_record,
has_private_files=has_private_files,
files=files
)
pre_template_render.send(
"%s.%s" % (blueprint.name, f.__name__),
recid=recid,
)
return f(recid, *args, **kwargs)
return decorated
@blueprint.route('/<int:recid>/metadata', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>', methods=['GET', 'POST'])
@blueprint.route('/<int:recid>/export/<of>', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.', _('Record'))
@wash_arguments({'of': (unicode, 'hd'), 'ot': (unicode, None)})
@request_record
@register_menu(blueprint, 'record.metadata', _('Information'), order=1,
endpoint_arguments_constructor=lambda:
dict(recid=request.view_args.get('recid')),
visible_when=visible_collection_tabs('metadata'))
def metadata(recid, of='hd', ot=None):
"""Display formated record metadata."""
from invenio.legacy.bibrank.downloads_similarity import \
register_page_view_event
from invenio.modules.formatter import get_output_format_content_type
register_page_view_event(recid, current_user.get_id(),
str(request.remote_addr))
if get_output_format_content_type(of) != 'text/html':
from invenio.modules.search.views.search import \
response_formated_records
return response_formated_records([recid], g.collection, of, qid=None)
# Send the signal 'document viewed'
record_viewed.send(
current_app._get_current_object(),
recid=recid,
id_user=current_user.get_id(),
request=request)
from invenio.b2share.modules.b2deposit.edit import is_record_editable
return render_template('records/metadata.html', of=of, ot=ot,
editable=is_
|
charismab45515t/dtbs-carrent
|
customer_auto_email/__openerp__.py
|
Python
|
gpl-2.0
| 624
| 0.016026
|
# -*- coding: utf-8 -*-
{
"name" : "Automatical email to customer",
"version" : "1.0",
"author" : "DTBS",
"description": """
|
Sends automatic when customer created
""",
"website" : "http://dtbsindo.web.id",
"category" : "",
# 'depends': ['base','csmart_base','sale_stock'],
'
|
depends': ['base','csmart_base','sale'],
"data" : [
'views/email_template_customer_auto.xml',
'views/configuration.xml',
'views/configuration_data.xml',
'views/menu.xml'
],
'js': [],
'css': [],
'qweb': [],
"active": False,
"installable": True,
}
|
rnixx/kivy
|
kivy/uix/pagelayout.py
|
Python
|
mit
| 7,321
| 0
|
"""
PageLayout
==========
.. image:: images/pagelayout.gif
:align: right
The :class:`PageLayout` class is used to create a simple multi-page
layout, in a way that allows easy flipping from one page to another using
borders.
:class:`PageLayout` does not currently honor the
:attr:`~kivy.uix.widget.Widget.size_hint`,
:attr:`~kivy.uix.widget.Widget.size_hint_min`,
:attr:`~kivy.uix.widget.Widget.size_hint_max`, or
:attr:`~kivy.uix.widget.Widget.pos_hint` properties.
.. versionadded:: 1.8.0
Example:
.. code-block:: kv
PageLayout:
Button:
text: 'page1'
Button:
text: 'page2'
Button:
text: 'page3'
Transitions from one page to the next are made by swiping in from the border
areas on the right or left hand side. If you wish to display multiple widgets
in a page, we suggest you use a containing layout. Ideally, each page should
consist of a single :mod:`~kivy.uix.layout` widget that contains the remaining
widgets on that page.
"""
__all__ = ('PageLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, DictProperty
from kivy.animation import Animation
class PageLayout(Layout):
'''PageLayout class. See module documentation for more information.
'''
page = NumericProperty(0)
'''The currently displayed page.
:data:`page` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.
'''
border = NumericProperty('50dp')
'''The width of the border around the current page used to display
the previous/next page swipe areas when needed.
:data:`border` is a :class:`~kivy.properties.NumericProperty` and
defaults to 50dp.
'''
swipe_threshold = NumericProperty(.5)
'''The threshold used to trigger swipes as ratio of the widget
size.
:data:`swipe_threshold` is a :class:`~kivy.properties.NumericProperty`
and defaults to .5.
'''
anim_kwargs = DictProperty({'d': .5, 't': 'in_quad'})
'''The animation kwargs used to construct the animation
:data:`anim_kwargs` is a :class:`~kivy.properties.DictProperty`
and defaults to {'d': .5, 't': 'in_quad'}.
.. versionadded:: 1.11.0
'''
def __init__(self, **kwargs):
super(PageLayout, self).__init__(**kwargs)
trigger = self._trigger_layout
fbind = self.fbind
fbind('border', trigger)
fbind('page', trigger)
fbind('parent', trigger)
fbind('children', trigger)
fbind('size', trigger)
fbind('pos', trigger)
def do_layout(self, *largs):
l_children = len(self.children) - 1
h = self.height
x_parent, y_parent = self.pos
p = self.page
border = self.border
half_border = border / 2.
right = self.right
width = self.width - border
for i, c in enumerate(reversed(self.children)):
if i < p:
x = x_parent
elif i == p:
if not p: # it's first page
x = x_parent
elif p != l_children: # not first, but there are post pages
x = x_parent + half_border
else: # not first and there are no post pages
x = x_parent + border
elif i == p + 1:
if not p: # second page - no left margin
x = right - border
else:
|
# there's already a left margin
x = right - half_border
else:
x = right
c.height = h
c.width = width
Animation(
x=x,
y=y_parent,
**self.anim_kwargs).start(c)
def on_touch_down(self, touch):
if (
self.disabled or
not self.collide_point(*touch.pos) or
not self.child
|
ren
):
return
page = self.children[-self.page - 1]
if self.x <= touch.x < page.x:
touch.ud['page'] = 'previous'
touch.grab(self)
return True
elif page.right <= touch.x < self.right:
touch.ud['page'] = 'next'
touch.grab(self)
return True
return page.on_touch_down(touch)
def on_touch_move(self, touch):
if touch.grab_current != self:
return
p = self.page
border = self.border
half_border = border / 2.
page = self.children[-p - 1]
if touch.ud['page'] == 'previous':
# move next page up to right edge
if p < len(self.children) - 1:
self.children[-p - 2].x = min(
self.right - self.border * (1 - (touch.sx - touch.osx)),
self.right)
# move current page until edge hits the right border
if p >= 1:
b_right = half_border if p > 1 else border
b_left = half_border if p < len(self.children) - 1 else border
self.children[-p - 1].x = max(min(
self.x + b_left + (touch.x - touch.ox),
self.right - b_right),
self.x + b_left)
# move previous page left edge up to left border
if p > 1:
self.children[-p].x = min(
self.x + half_border * (touch.sx - touch.osx),
self.x + half_border)
elif touch.ud['page'] == 'next':
# move current page up to left edge
if p >= 1:
self.children[-p - 1].x = max(
self.x + half_border * (1 - (touch.osx - touch.sx)),
self.x)
# move next page until its edge hit the left border
if p < len(self.children) - 1:
b_right = half_border if p >= 1 else border
b_left = half_border if p < len(self.children) - 2 else border
self.children[-p - 2].x = min(max(
self.right - b_right + (touch.x - touch.ox),
self.x + b_left),
self.right - b_right)
# move second next page up to right border
if p < len(self.children) - 2:
self.children[-p - 3].x = max(
self.right + half_border * (touch.sx - touch.osx),
self.right - half_border)
return page.on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current == self:
if (
touch.ud['page'] == 'previous' and
abs(touch.x - touch.ox) / self.width > self.swipe_threshold
):
self.page -= 1
elif (
touch.ud['page'] == 'next' and
abs(touch.x - touch.ox) / self.width > self.swipe_threshold
):
self.page += 1
else:
self._trigger_layout()
touch.ungrab(self)
if len(self.children) > 1:
return self.children[-self.page + 1].on_touch_up(touch)
if __name__ == '__main__':
from kivy.base import runTouchApp
from kivy.uix.button import Button
pl = PageLayout()
for i in range(1, 4):
b = Button(text='page%s' % i)
pl.add_widget(b)
runTouchApp(pl)
|
os-cloud-storage/openstack-workload-disaster-recovery
|
dragon/workload_policy/actions/plugins/instance_image_action.py
|
Python
|
apache-2.0
| 6,714
| 0.000149
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
import StringIO
import os
import socket
from dragon.engine.clients import Clients
from dragon.openstack.common import log as logging
from dragon.openstack.common import exception
from dragon.workload_policy.actions import action
from dragon.workload_policy.actions import action_execution as ae
from oslo.config import cfg
import dragon.openstack.common.uuidutils as uuidutils
from dragon.template.heat_template import InstanceResource
LOG = logging.getLogger(__name__)
instance_image_opts = [
cfg.IntOpt('backup_image_object_size',
default=52428800,
help='The size in bytes of instance image objects')
]
CONF = cfg.CONF
CONF.register_opts(instance_image_opts)
class InstanceImageAction(action.Action):
is_global = True # store in global container
def __init__(self, context):
self.clients = Clients(context)
self._image_id = None
self._name = None
self._resource_id = None
self.data_block_size_bytes = CONF.backup_image_object_size
# super(action.Action, self).__init__(workload_action_excution_id)
def pre_protect(self, cntx, workload_action_excution_id,
resource_id):
pass
def post_protect(self, cntx, workload_action_excution_id,
resource_id):
pass
def protect(self, cntx, workload_action_excution_id,
resource_id, container_name):
LOG.debug("protecting instance (image copied) %s" % (resource_id))
instance = self.clients.nova().servers.get(resource_id)
self._image_id = instance.image['id']
self._name = instance.name
self._resource_id = resource_id
instance_copy_execution =\
ae.ActionExecution(workload_action_excution_id,
resource_id, self.id)
result = self._imagecopy(cntx, instance, container_name,
instance_copy_execution)
return result
def generate_template(self, context, templat
|
e_gen):
instance = InstanceResource(self._image_id, self._name, resource_id=self._resource_id)
template_gen.add_instance(instance)
def failover(self, context, resource_id, resource_data, container_name):
return self._import_from_swift(context, resource_id,
resource_data, container_name)
def _import_from_swift(self, context, resource_id,
resource_data, contain
|
er_name):
LOG.debug("resource %s data %s container %s" %
(resource_id, resource_data, container_name))
swift_client = self.clients.swift()
data_chunks = resource_data["chunks"]
image_id = resource_data["image_id"]
image_response_data = StringIO.StringIO()
for chunk in range(data_chunks):
swift_meta, image_response =\
swift_client.get_object(container_name,
image_id + "_" + str(chunk))
image_response_data.write(image_response)
try:
image = {}
image['name'] = resource_data["meta"]["name"]
image['size'] = resource_data["meta"]["size"]
image['disk_format'] = resource_data["meta"]["disk_format"]
image['container_format'] =\
resource_data["meta"]["container_format"]
image['id'] = uuidutils.generate_uuid()
image_response_data.seek(0, os.SEEK_SET)
self.clients.glance().images.create(data=image_response_data,
**image)
self._image_id = image['id']
self._name = resource_data["instance_name"]
return True
# except ImageAlreadyPresentException:
except Exception, e:
LOG.error(e)
return False
def _imagecopy(self, context, instance, container_name, action_excution):
backup_rec = {}
action_excution.set_status(context, 'uploaded to swift')
swift_conn = Clients(context).swift()
headers = {'X-Container-Meta-dr_state': 'processing'}
image = self.clients.glance().images.get(self._image_id)
# take the checksum as unique id
global_container_image_id = image._info['checksum']
image_response = image.data()
image_response_data = StringIO.StringIO()
for chunk in image_response:
image_response_data.write(chunk)
image_response_data.seek(0, os.SEEK_SET)
chunks = 0
while True:
data = image_response_data.read(self.data_block_size_bytes)
data_offset = image_response_data.tell()
LOG.debug("uploading image offset %s chunks %s"
% (data_offset, chunks))
if data == '':
break
try:
swift_conn.put_object(container_name,
global_container_image_id + "_" +
str(chunks),
data,
content_length=len(data))
chunks += 1
except socket.error as err:
dr_state = 'DR image backup failed'
action_excution.set_status(context, dr_state)
raise exception.SwiftConnectionFailed(reason=str(err))
dr_state = 'Protected'
backup_rec["metadata"] = instance.metadata
backup_rec["image_id"] = global_container_image_id
backup_rec["instance_name"] = self._name
backup_rec["meta"] = image.to_dict()
backup_rec["chunks"] = chunks
action_excution.set_status(context, dr_state)
return dr_state, backup_rec
|
monk-ee/puppetdb-python
|
puppetdb/v3/aggregate_event_counts.py
|
Python
|
mit
| 6,665
| 0.005553
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Arcus, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
aggregate_event_counts.py: A bunch of API methods for interacting with v3 aggregate event counts in the PuppetDB API.
Operators
See the Operators page for the full list of available operators. Note that inequality operators (<, >, <=, >=) are only supported against the timestamp FIELD.
Fields
FIELD may be any of the following. Unless otherwise noted, all fields support both equality and regular expression match operators, but do not support inequality operators.
certname: the name of the node that the event occurred on.
report: the id of the report that the event occurred in; these ids can be acquired via event queries or via the /reports query endpoint.
status: the status of the event; legal values are success, failure, noop, and skipped.
timestamp: the timestamp (from the puppet agent) at which the event occurred. This field supports the inequality operators. All values should be specified as ISO-8601 compatible date/time strings.
run-start-time: the timestamp (from the puppet agent) at which the puppet run began. This field supports the inequality operators. All values should be specified as ISO-8601 compatible date/time strings.
run-end-time: the timestamp (from the puppet agent) at which the puppet run finished. This field supports the inequality operators. All values should be specified as ISO-8601 compatible date/time strings.
report-receive-time: the timestamp (from the PuppetDB server) at which the puppet report was received. This field supports the inequality operators. All values should be specified as ISO-8601 compatible date/time strings.
resource-type: the type of resource that the event occurred on; e.g., File, Package, etc.
resource-title: the title of the resource that the event occurred on
property: the property/parameter of the resource that the event occurred on; e.g., for a Package resource, this field might have a value of ensure. NOTE: this field may contain NULL values; see notes below.
new-value: the new value that Puppet was attempting to set for the specified resource property. NOTE: this field may contain NULL values; see notes below.
old-value: the previous value of the resource property, which Puppet was attempting to change. NOTE: this field may contain NULL values; see notes below.
message: a description (supplied by the resource provider) of what happened during the event. NOTE: this field may contain NULL values; see notes below.
file: the manifest file in which the resource definition is located. NOTE: this field may contain NULL values; see notes below.
line: the line (of the containing manifest file) at which the resource definition can be found. NOTE: this field may contain NULL values; see notes below.
containing-class: the Puppet class where this resource is declared. NOTE: this field may contain NULL values; see notes below.
latest-report?: whether the event occurred in the most recent Puppet run (per-node). NOTE: the value of this field is always boolean (true or false without quotes), and it is not supported by the regex match operator.
Notes on fields that allow NULL values
In the case of a skipped resource event, some of the fields of an event may not have values. We handle this case in a slightly special way when these fields are used in equality (=) or inequality (!=) queries; specifically, an equality query will always return false for an event with no value for the field, and an inequality query will always return true.
|
The response is a single JSON map containing aggregated event-count information and a total for how many event-count results were aggregated.
{
"successes": 2,
"failures": 0,
"noops": 0,
"skips": 1,
"total": 3
}
"""
__author__ = "monkee"
__version__ = "1.0.1"
__maintainer__ = "monk-ee"
__email__
|
= "magic.monkee.magic@gmail.com"
__status__ = "Development"
from puppetdb import utils
API_VERSION = 'v3'
def get_aggregate_event_counts(api_url=None, query='', summarize_by='', count_by='', counts_filter='', distinct_resources='', verify=False, cert=list()):
"""
Returns facts
:param api_url: Base PuppetDB API url
:param query: Required. A JSON array of query predicates in prefix form (["<OPERATOR>", "<FIELD>", "<VALUE>"]). This query is forwarded to the events endpoint - see there for additional documentation.
:param summarize_by: Required. A string specifying which type of object you’d like to see counts for. Supported values are resource, containing-class, and certname.
:param count_by: Optional. A string specifying what type of object is counted when building up the counts of successes, failures, noops, and skips. Supported values are resource (default) and certname.
:param counts_filter: Optional. A JSON array of query predicates in the usual prefix form. This query is applied to the final event counts output. Supported operators are =, >, <, >=, and <=. Supported fields are failures, successes, noops, and skips.
:param distinct_resources: Optional. (EXPERIMENTAL: it is possible that the behavior of this parameter may change in future releases.) This parameter is passed along to the event query - see there for additional documentation.
Response Format
{
"successes": 2,
"failures": 0,
"noops": 0,
"skips": 1,
"total": 3
}
"""
return utils._make_api_request(api_url, '/aggregate-event-counts', verify, cert, params={'query': query,'summarize-by':summarize_by,'count-by':count_by,'counts-filter':counts_filter,'distinct_resources':distinct_resources})
|
occrp/id-backend
|
api_v3/views/review_exports.py
|
Python
|
mit
| 1,593
| 0
|
from csv import DictWriter
from datetime import datetime
from itertools import chain
from django.db import models
from django.db.models.functions import Concat
from django.http import StreamingHttpResponse
from rest_framework import permissions
from .reviews import ReviewsEndpoint
from .ticket_exports import TicketExportsEndpoint, DummyBuffer
class ReviewExport
|
sEndpoint(ReviewsEndpoint):
permission_classes = (permissions.IsAdminUser, )
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
ticket_url = \
TicketExportsEndpoint.TICKET_URI.format(self.request.get_host())
cols = dict(
Ticket=Concat(
models.Value(ticket_url),
models.F('ticket_id'),
|
output_field=models.CharField()
),
Date=models.F('created_at'),
Rating=models.F('rating'),
Link=models.F('link'),
Comment=models.F('body')
)
writer = DictWriter(DummyBuffer(), fieldnames=cols.keys())
header_with_rows = chain(
[dict(zip(cols.keys(), cols.keys()))],
queryset.values(**cols)
)
response = StreamingHttpResponse(
streaming_content=(
writer.writerow(row) for row in header_with_rows
),
content_type='text/csv'
)
response['Content-Disposition'] = (
'attachment; filename="reviews-{}.csv"'.format(
datetime.utcnow().strftime('%x')))
return response
|
oleg-alexandrov/pytest
|
testing/test_cache.py
|
Python
|
mit
| 11,823
| 0.000592
|
import sys
import pytest
import os
import shutil
import py
pytest_plugins = "pytester",
class TestNewAPI:
def test_config_cache_makedir(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
with pytest.raises(ValueError):
config.cache.makedir("key/name")
p = config.cache.makedir("name")
assert p.check()
def test_config_cache_dataerror(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
cache = config.cache
pytest.raises(TypeError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write("123invalid")
val = config.cache.get("key/name", -2)
assert val == -2
def test_cache_writefail_cachfile_silent(self, testdir):
testdir.makeini("[pytest]")
testdir.tmpdir.join('.cache').write('gone wrong')
config = testdir.parseconfigure()
cache = config.cache
cache.set('test/broken', [])
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
def test_cache_writefail_permissions(self, testdir):
testdir.makeini("[pytest]")
testdir.tmpdir.ensure_dir('.cache').chmod(0)
config = testdir.parseconfigure()
cache = config.cache
cache.set('test/broken', [])
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows')
def test_cache_failure_warns(self, testdir):
testdir.tmpdir.ensure_dir('.cache').chmod(0)
testdir.makepyfile("""
def test_pass():
pass
""")
result = testdir.runpytest('-rw')
assert result.ret == 0
result.stdout.fnmatch_lines([
"*could not create cache path*",
"*1 pytest-warnings*",
])
def test_config_cache(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
""")
testdir.makepyfile("""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cachefuncarg(self, testdir):
testdir.makepyfile("""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cache_reportheader(testdir):
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"cachedir: .cache"
])
def test_cache_show(testdir):
result = testdir.runpytest("--cache-show")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
])
testdir.makeconftest("""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("other/some", {1:2})
dp = config.cache.makedir("mydb")
dp.ensure("hello")
dp.ensure("world")
""")
result = testdir.runpytest()
assert result.ret == 5 # no tests executed
result = testdir.runpytest("--cache-show")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
"*my/name contains:",
" [1, 2, 3]",
"*other/some contains*",
" {*1*: 2}",
"-*cache directories*-",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.c
|
ode.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
|
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clear-cache is robust
if os.path.isdir('.cache'):
shutil.rmtree('.cache')
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
testdir.tmpdir.join('test_a.py').write(py.code.Source("""
def test_always_passes():
assert 1
"""))
testdir.tmpdir.join('test_b.py').write(py.code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
testdir.inline_runsource("""
import pytest
|
JackKelly/neuralnilm_prototype
|
scripts/e71.py
|
Python
|
mit
| 2,878
| 0.00139
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
* back to 2 conv layers
e70
* Based on e65
* Using sigmoid instead of rectify in Conv1D layers
e71
* Larger layers
* More data
Results
"""
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_appliance_powers=[300, 500, 200],
window=("2013-06-01", "2014-07-01"),
output_one_appliance=False,
boolean_targets=False,
min_on_duration=60,
input_padding=4
)
net = Net(
experiment_name="e71",
source=source,
learning_rate=1e-1,
save_plot_i
|
nterval=50,
loss_function=crossentropy,
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': Conv1DLayer,
|
'num_filters': 50,
'filter_length': 3,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
|
Dwarfartisan/pyparsec
|
src/parsec/state.py
|
Python
|
mit
| 707
| 0
|
#!/usr/bin/python3
# coding:utf-8
from .error import ParsecE
|
of
class BasicState(object):
def __init__(self, data):
self.data = data
self.index = 0
self.tran = -1
def next(self
|
):
if 0 <= self.index < len(self.data):
re = self.data[self.index]
self.index += 1
return re
else:
raise ParsecEof(self)
def begin(self):
if self.tran == -1:
self.tran = self.index
return self.index
def commit(self, tran):
if self.tran == tran:
self.tran = -1
def rollback(self, tran):
self.index = tran
if self.tran == tran:
self.tran = -1
|
hgl888/chromium-crosswalk-efl
|
mojo/python/tests/bindings_structs_unittest.py
|
Python
|
bsd-3-clause
| 6,707
| 0.00656
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
# pylint: disable=F0401
import mojo.system
# Generated files
# pylint: disable=F0401
import sample_import_mojom
import sample_import2_mojom
import sample_service_mojom
class StructBindingsTest(unittest.TestCase):
def testModule(self):
self.assertEquals(sample_service_mojom.DefaultsTest.__module__,
'sample_service_mojom')
def testDefaultsTest(self):
defaults_test = sample_service_mojom.DefaultsTest()
self.assertEquals(defaults_test.a0, -12)
self.assertEquals(defaults_test.a1, 12)
self.assertEquals(defaults_test.a2, 1234)
self.assertEquals(defaults_test.a3, 34567)
self.assertEquals(defaults_test.a4, 123456)
self.assertEquals(defaults_test.a5, 3456789012)
self.assertEquals(defaults_test.a6, -111111111111)
self.assertEquals(defaults_test.a7, 9999999999999999999)
self.assertEquals(defaults_test.a8, 0x12345)
self.assertEquals(defaults_test.a9, -0x12345)
self.assertEquals(defaults_test.a10, 1234)
self.assertEquals(defaults_test.a11, True)
self.assertEquals(defaults_test.a12, False)
self.assertEquals(defaults_test.a13, 123.25)
self.assertEquals(defaults_test.a14, 1234567890.123)
self.assertEquals(defaults_test.a15, 1E10)
|
self.assertEquals(defaults_test.a16, -1.2E+20)
self.assertEquals(defaults_test.a17, 1.23E-20)
self.assertEquals(defaults_test.a18, None)
self.assertEquals(defaults_test.a19, None)
self.assertEquals(defaults_test.a20, sample_service_mojom.Bar.Type.BOTH)
self.assertEquals(defaults_test.a21,
|
None)
self.assertTrue(isinstance(defaults_test.a22, sample_import2_mojom.Thing))
self.assertEquals(defaults_test.a23, 0xFFFFFFFFFFFFFFFF)
self.assertEquals(defaults_test.a24, 0x123456789)
self.assertEquals(defaults_test.a25, -0x123456789)
self.assertEquals(defaults_test.a26, float('inf'))
self.assertEquals(defaults_test.a27, float('-inf'))
self.assertTrue(math.isnan(defaults_test.a28))
self.assertEquals(defaults_test.a29, float('inf'))
self.assertEquals(defaults_test.a30, float('-inf'))
self.assertTrue(math.isnan(defaults_test.a31))
def testNoAliasing(self):
foo1 = sample_service_mojom.Foo()
foo2 = sample_service_mojom.Foo()
foo1.name = "foo1"
foo2.name = "foo2"
self.assertEquals(foo1.name, "foo1")
self.assertEquals(foo2.name, "foo2")
defaults_test1 = sample_service_mojom.DefaultsTest()
defaults_test2 = sample_service_mojom.DefaultsTest()
self.assertNotEquals(defaults_test1.a22, defaults_test2.a22)
def testImmutableAttributeSet(self):
foo_instance = sample_service_mojom.Foo()
with self.assertRaises(AttributeError):
foo_instance.new_attribute = None
with self.assertRaises(AttributeError):
del foo_instance.name
def _TestIntegerField(self, entity, field_name, bits, signed):
if signed:
min_value = -(1 << (bits - 1))
max_value = (1 << (bits - 1)) - 1
else:
min_value = 0
max_value = (1 << bits) - 1
entity.__setattr__(field_name, min_value)
entity.__setattr__(field_name, max_value)
with self.assertRaises(TypeError):
entity.__setattr__(field_name, None)
with self.assertRaises(OverflowError):
entity.__setattr__(field_name, min_value - 1)
with self.assertRaises(OverflowError):
entity.__setattr__(field_name, max_value + 1)
with self.assertRaises(TypeError):
entity.__setattr__(field_name, 'hello world')
def testTypes(self):
defaults_test = sample_service_mojom.DefaultsTest()
# Integer types
self._TestIntegerField(defaults_test, 'a0', 8, True)
self._TestIntegerField(defaults_test, 'a1', 8, False)
self._TestIntegerField(defaults_test, 'a2', 16, True)
self._TestIntegerField(defaults_test, 'a3', 16, False)
self._TestIntegerField(defaults_test, 'a4', 32, True)
self._TestIntegerField(defaults_test, 'a5', 32, False)
self._TestIntegerField(defaults_test, 'a6', 64, True)
self._TestIntegerField(defaults_test, 'a7', 64, False)
# Boolean types
defaults_test.a11 = False
self.assertEquals(defaults_test.a11, False)
defaults_test.a11 = None
self.assertEquals(defaults_test.a11, False)
defaults_test.a11 = []
self.assertEquals(defaults_test.a11, False)
defaults_test.a12 = True
self.assertEquals(defaults_test.a12, True)
defaults_test.a12 = 1
self.assertEquals(defaults_test.a12, True)
defaults_test.a12 = [[]]
self.assertEquals(defaults_test.a12, True)
# Floating point types
with self.assertRaises(TypeError):
defaults_test.a13 = 'hello'
with self.assertRaises(TypeError):
defaults_test.a14 = 'hello'
# Array type
defaults_test.a18 = None
defaults_test.a18 = []
defaults_test.a18 = [ 0 ]
defaults_test.a18 = [ 255 ]
defaults_test.a18 = [ 0, 255 ]
with self.assertRaises(TypeError):
defaults_test.a18 = [[]]
with self.assertRaises(OverflowError):
defaults_test.a18 = [ -1 ]
with self.assertRaises(OverflowError):
defaults_test.a18 = [ 256 ]
# String type
defaults_test.a19 = None
defaults_test.a19 = ''
defaults_test.a19 = 'hello world'
with self.assertRaises(TypeError):
defaults_test.a19 = [[]]
with self.assertRaises(TypeError):
defaults_test.a19 = [ -1 ]
with self.assertRaises(TypeError):
defaults_test.a19 = [ 256 ]
# Structs
defaults_test.a21 = None
defaults_test.a21 = sample_import_mojom.Point()
with self.assertRaises(TypeError):
defaults_test.a21 = 1
with self.assertRaises(TypeError):
defaults_test.a21 = sample_import2_mojom.Thing()
# Handles
foo_instance = sample_service_mojom.Foo()
foo_instance.source = None
foo_instance.source = mojo.system.Handle()
with self.assertRaises(TypeError):
foo_instance.source = 1
with self.assertRaises(TypeError):
foo_instance.source = object()
def testConstructor(self):
bar_instance = sample_service_mojom.Bar()
foo_instance = sample_service_mojom.Foo(name="Foo",
x=-1,
y=5,
a=False,
bar=bar_instance)
self.assertEquals(foo_instance.name, "Foo")
self.assertEquals(foo_instance.x, -1)
self.assertEquals(foo_instance.y, 5)
self.assertEquals(foo_instance.a, False)
self.assertEquals(foo_instance.bar, bar_instance)
|
imk1/IMKTFBindingCode
|
averageZeroSignalsWithinPeaks.py
|
Python
|
mit
| 2,918
| 0.025703
|
import sys
import argparse
from itertools import izip
import math
def parseArgument():
# Parse the input
parser = argparse.ArgumentParser(description = "Make regions with 0 signal the average of their surrounding regions")
parser.add_argument("--signalsFileName", required=True, help='Signals file')
parser.add_argument("--peakIndexesFileName", required=True, help='Peak indexes file')
parser.add_argument("--outputFileName", required=True, help='Output file, where signals that were 0 will be the average of their surrounding signals')
options = parser.parse_args();
return options
def averageZeroSignalsWithinPeaks(options):
signalsFile = open(options.signalsFileName)
peakIndexesFile = open(options.peakIndexesFileName)
outputFile = open(options.outputFileName, 'w+')
lastSignal = None
lastLastSignal = None
lastPeakIndex = None
lastLastPeakIndex = None
for signalsLine, peakIndexesLine in izip(signalsFile, peakIndexesFile):
# Iterate through the signals and set those that are zero to the average of those of the surrounding regions
signal = float(signalsLine.strip())
peakIndex = int(peakIndexesLine.strip())
if lastSignal == 0:
# The previous signal was a zero, so set it to the average of the surrounding signals
if (peakIndex == lastPeakIndex) and (not math.isnan(lastSignal)):
# Include the current region in the average
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Include the region before the previous region in the average
if not math.isnan(signal):
# The current signal is not a nan, so include it in the average
lastSignalCorrected = (signal + lastLastSignal)/2.0
outputFile.write(str(lastSignalCorrected) + "\n")
else:
# The current signal is a nan, so use only the previous signal
outputFile.write(str(lastLastSignal) + "\n")
elif not math.isnan(signal):
outputFile.write(str(signal) + "\n")
|
else:
outputFile.write(str(lastSignal) + "\n")
elif (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
if signal != 0:
# The current signal is not 0, so record it
outputFile.write(str(signal) + "\n")
lastLastSignal = lastSignal
lastLastPeakIndex = lastPeakIndex
lastSignal = signal
lastPeakIndex = peakIndex
if
|
lastSignal == 0:
# The final signal was a zero, so set it to the signal before it
if (lastPeakIndex == lastLastPeakIndex) and (not math.isnan(lastLastSignal)):
# Set the output to the region before it
outputFile.write(str(lastLastSignal) + "\n")
else:
outputFile.write(str(lastSignal) + "\n")
signalsFile.close()
peakIndexesFile.close()
outputFile.close()
if __name__=="__main__":
options = parseArgument()
averageZeroSignalsWithinPeaks(options)
|
moskytw/tacit
|
tests/test_correctness.py
|
Python
|
mit
| 884
| 0.003394
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from itertools import izip
from tacit import tac
ordered_list_path = 'data/ordered.list'
expected_lines = open(ordered_list_path).read().splitlines(True)
expected_lines.reverse()
expected_count = len(expected_lines)
for bsize in range(10):
count = 0
for expected_line, line in izip(
expected_lines,
tac(ordered_list_path, bsize)
):
if line != expected_line:
print >> sys.stderr, 'error: bsize=%d, expected_line=%r, line=%r' % (bsize, expected_line, line)
|
count += 1
if bsize > 0:
if count != expected_count:
print >> sys.stderr, 'error: bsize=%d, expected_count=%r, count=%r' % (bsize, expected_count, count)
else:
if count != 0:
print >> sys.stderr, 'error: bsize=%d, expected_count=0, count=%r' % (bsize, count)
|
|
cheng10/Shanbay_Clone
|
words/forms.py
|
Python
|
gpl-3.0
| 407
| 0
|
from django import forms
from django.contrib.auth.m
|
odels import User
from models import Learner
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
|
class Meta:
model = User
fields = ('username', 'email', 'password')
class LearnerForm(forms.ModelForm):
class Meta:
model = Learner
fields = ('vocab_book', 'words_perday')
|
benvanwerkhoven/kernel_tuner
|
kernel_tuner/strategies/pso.py
|
Python
|
apache-2.0
| 4,015
| 0.001743
|
""" The strategy that uses particle swarm optimization"""
from __future__ import print_function
from __future__ import division
import random
import numpy as np
from kernel_tuner.strategies.minimize import _cost_func, get_bounds_x0_eps
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
#scale variables in x because PSO works with velocities to visit different configurations
tuning_options["scaling"] = True
#using this instead of get_bounds because scaling is used
bounds, _, _ = get_bounds_x0_eps(tuning_options)
args = (kernel_options, tuning_options, runner, results)
num_particles = tuning_options.strategy_options.get("popsize", 20)
maxiter = tuning_options.strategy_options.get("maxiter", 100)
w = tuning_options.strategy_options.get("w", 0.5) # inertia constant
c1 = tuning_options.strategy_options.get("c1", 2.0) # cognitive constant
c2 = tuning_options.strategy_options.get("c2", 1.0) # social constant
best_time_global = 1e20
best_position_global = []
# init particle swarm
swarm = []
for i in range(0, num_particles):
swarm.append(Particle(bounds, args))
for i in range(maxiter):
if tuning_options.verbose:
print("start iteration ", i, "best time global", best_time_global)
# evaluate particle positions
for j in range(num_particles):
swarm[j].evaluate(_cost_func)
# update global best if needed
if swarm[j].time <= best_time_global:
best_position_global = swarm[j].position
best_time_global = swarm[j].time
# update particle velocities and positions
for j in range(0, num_particles):
swarm[j].update_velocity(best_position_global, w, c1, c2)
swarm[j].update_position(bounds)
if tuning_options.verbose:
print('Final result:')
print(best_position_global)
print(best_time_global)
return results, runner.dev.get_environment()
class Particle:
def __init__(self, bounds, args):
self.ndim = len
|
(bounds)
self.args = args
self.velocity = np.random.uniform(-1, 1, self.ndim)
self.position = np.random.uniform([b[0] for b in bounds], [b[1] for b in bounds])
self.best_pos = self.position
self.best_time =
|
1e20
self.time = 1e20
def evaluate(self, cost_func):
self.time = cost_func(self.position, *self.args)
# update best_pos if needed
if self.time < self.best_time:
self.best_pos = self.position
self.best_time = self.time
def update_velocity(self, best_position_global, w, c1, c2):
r1 = random.random()
r2 = random.random()
vc = c1 * r1 * (self.best_pos - self.position)
vs = c2 * r2 * (best_position_global - self.position)
self.velocity = w * self.velocity + vc + vs
def update_position(self, bounds):
self.position = self.position + self.velocity
self.position = np.minimum(self.position, [b[1] for b in bounds])
self.position = np.maximum(self.position, [b[0] for b in bounds])
|
andresfcardenas/marketing-platform
|
landing/forms.py
|
Python
|
bsd-3-clause
| 803
| 0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from landing.models import LandingRegister
class LandingForm(forms.ModelForm):
class Meta:
model = LandingRegister
fields = (
'name',
'email',
)
def __init__(self, *args, **kwargs):
super(LandingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields['name'].widget.attrs['placeholder'] = 'Name'
self.fi
|
elds['email'].widget
|
.attrs['placeholder'] = 'Email'
self.helper.add_input(
Submit(
'submit',
'Solicitar Información',
css_class='button'
),
)
|
dreadrel/UWF_2014_spring_COP3990C-2507
|
notebooks/scripts/book_code/code/decorator5.py
|
Python
|
apache-2.0
| 750
| 0.005333
|
d
|
ef tracer(func): # State via enclosing scope and func attr
def wrapper(*args, **kwargs): # calls is per-function, not global
wrapper.calls += 1
print('call %s to %s' % (wrapper.calls, func.__name__))
return func(*args, **kwargs)
wrapper.calls = 0
return wr
|
apper
@tracer
def spam(a, b, c): # Same as: spam = tracer(spam)
print(a + b + c)
@tracer
def eggs(x, y): # Same as: eggs = tracer(eggs)
print(x ** y)
spam(1, 2, 3) # Really calls wrapper, assigned to spam
spam(a=4, b=5, c=6) # wrapper calls spam
eggs(2, 16) # Really calls wrapper, assigned to eggs
eggs(4, y=4) # wrapper.calls _is_ per-decoration here
|
Vishakha1990/Lambdas
|
awsLambda_ours/nearbyfriends/findnearbypeoplelocal.py
|
Python
|
apache-2.0
| 1,712
| 0.003505
|
from __future__ import print_function
import time
import uuid
import sys
import socket
import logging
import gpxpy.geo
import redis
from operator import itemgetter
r = redis.Redis(
hos
|
t='datanode-001.zumykb.0001.use2.cach
|
e.amazonaws.com',
port=6379)
def get_distance(lat, querylat, querylong):
splitloc = lat
friendts = splitloc[0]
friendlat = float(splitloc[1])
friendlog = float(splitloc[2])
return gpxpy.geo.haversine_distance(friendlat, friendlog, querylat, querylong)
def handler():
"""
This function puts into memcache and get from it.
Memcache is hosted using elasticache
"""
# r = redis.Redis(
# host='datanode-001.zumykb.0001.use2.cache.amazonaws.com',
# port=6379)
logging.info('created redis client')
queryuserid = sys.argv[1]
querylat = float(sys.argv[2])
querylong = float(sys.argv[3])
peoplelist = r.keys('userdata*')
print("People List size : ", len(peoplelist))
dist = []
i=0
for friend in peoplelist:
i=i+1
if(i%100)==0:
print(i)
# calculating distance for each friend
allLocations = r.smembers(friend)
tupleList = []
for location in allLocations:
tupleList.append(tuple(location.split(',')))
if (len(tupleList) > 0):
lat_location = max(tupleList, key=itemgetter(0))
dist.append(get_distance(lat_location, querylat, querylong))
if (len(dist) > 0):
minDistance = min(dist)
print(minDistance)
else:
print("No friends found")
return "Fetched value from memcache: "
def main(argv):
handler()
if __name__ == "__main__":
main(sys.argv)
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/test/functional/undefined_loop_variable.py
|
Python
|
apache-2.0
| 733
| 0.006821
|
# pylint: disable=missing-docstring
def do_stuff(some_random_list):
for var in some_random_list:
pass
return var # [undefined-loop-
|
variable]
def do_else(some_random_list):
for var in some_random_list:
if var == 42:
break
else:
var = 84
return var
__revision__ = 'yo'
TEST_LC = [C for C in __revision__ if C.isalpha()]
B = [B for B in __revision__ if B.isalpha()]
VAR2 = B # nor this one
for var1, var2 in TEST_LC:
var1 = var2 + 4
VAR3 = var1 # [undefined-loop-variable]
for note in __revision__:
note.something()
for line in __revision__:
for note in line:
A = note.a
|
notherthing()
for x in []:
pass
for x in range(3):
VAR5 = (lambda: x)()
|
glujan/lapka
|
tests/unit/test_fetch.py
|
Python
|
mit
| 9,020
| 0.002443
|
import unittest
from pathlib import Path
from unittest.mock import patch
from urllib.parse import urljoin, urlparse
from aiohttp import ClientSession
from lxml.etree import XPath, XPathSyntaxError
from tests.utils import AsyncMeta, fake_response as f_resp
from lapka import fetch
class TestShelter(unittest.TestCase, metaclass=AsyncMeta):
@classmethod
def setUpClass(cls):
cls.animals_list = """
<a class='animal' href="/animal01">Animal 01</a>
<a class="animal" href="http://example.com/animal02">Animal 02</a>
<a class="next" href="http://example.com/?p=2">Next</a>
"""
def setUp(self):
self.shelter = fetch.Shelter()
async def test_parse(self):
urls = ['1', '2', '3']
async def dummy_urls(*args):
for u in urls:
yield u
mock_parse = patch.object(self.shelter, '_parse', return_value={}).start()
mock_urls = patch.object(self.shelter, '_animals_urls', side_effect=dummy_urls).start()
with patch.object(
ClientSession, 'get', return_value=f_resp(self.animals_list)
) as mock_get:
async with ClientSession() as session:
resp = await self.shelter.parse(session)
self.assertIsInstance(resp, list)
self.assertEqual(len(resp), len(urls))
self.assertEqual(mock_get.call_count, len(urls))
self.assertEqual(mock_parse.call_count, len(urls))
for r in resp:
self.assertIn(r['url'], urls)
mock_urls.assert_called_once()
mock_parse.stop()
mock_urls.stop()
async def test_parse_not_overwrite_session(self):
async def dummy_urls(*args):
for u in []:
yield u
mock_parse = patch.object(self.shelter, '_parse', return_value={}).start()
mock_urls = patch.object(self.shelter, '_animals_urls', side_effect=dummy_urls).start()
with patch.object(ClientSession, 'get', return_value=f_resp(self.animals_list)):
async with ClientSession() as session:
self.shelter.session = session
await self.shelter.parse(None)
self.assertIs(self.shelter.session, session)
mock_parse.stop()
mock_urls.stop()
async def test__animals_urls(self):
animals = ["http://example.com/animal01", "http://example.com/animal02"] * 2
urls = []
base = 'http://example.com'
orig_start_url = fetch.Shelter.start_url
fetch.Shelter.start_url = base
self.shelter = fetch.Shelter()
self.shelter.animal_url = "//a[@class='animal']/@href"
self.shelter.next_url = "//a[@class='next']/@href"
self.assertEqual(self.shelter.start_url, base)
with patch.object(
ClientSession, 'get', return_value=f_resp(self.animals_list)
) as mock_get:
async with ClientSession() as session:
self.shelter.session = session
urls = [url async for url in self.shelter._animals_urls()]
self.assertListEqual(urls, animals)
self.assertEqual(mock_get.call_count, 2)
mock_get.assert_any_call(self.shelter.start_url)
mock_get.assert_any_call("http://example.com/?p=2")
fetch.Shelter.start_url = orig_start_url
async def test__animals_urls_invalid_html(self):
urls = []
self.shelter.animal_url = "//a[@class='animal']/@href"
self.shelter.next_url = "//a[@class='next']/@href"
self.shelter.start_url = 'http://example.com'
with patch.object(ClientSession, 'get', return_value=f_resp('Invalid')) as mock_get:
async with ClientSession() as session:
self.shelter.session = session
urls = [url async for url in self.shelter._animals_urls()]
mock_get.assert_called_once_with(self.shelter.start_url)
self.assertListEqual([], urls)
class TestConcreteShelter:
shelter_class = None
animals_urls = {"animals": [], "next_page": ""}
def setUp(self):
self.shelter = self.shelter_class()
def test_class_attributes(self):
try:
XPath(self.shelter.animal_url)
XPath(self.shelter.next_url)
except XPathSyntaxError as e:
self.fail(e.msg)
url = urlparse(self.shelter.start_url)
self.assertIn(url.scheme, ('http', 'https'))
self.assertTrue(url.netloc)
def test__parse_invalid_html(self):
with self.subTest("Empty html"):
data = self.shelter._parse("")
self.assertDictEqual(data, {})
with self.subTest("Invalid HTML"):
data = self.shelter._parse("<html><p>Invalid</p></html>")
self.assertDictEqual(data, {})
async def test__animals_urls(self):
animals = self.animals_urls["animals"] * 2
urls = []
with patch.object(
ClientSession, 'get', return_value=f_resp(self.animals_list)
) as mock_get:
async with ClientSession() as session:
self.shelter.session = session
urls = [url async for url in self.shelter._animals_urls()]
self.assertListEqual(urls, animals)
self.assertEqual(mock_get.call_count, 2)
mock_get.assert_any_call(self.shelter.start_url)
mock_get.assert_any_call(self.animals_urls["next_page"])
class TestSchroniskoWroclawPl(TestConcreteShelter, unittest.TestCase, metaclass=AsyncMeta):
shelter_class = fetch.SchroniskoWroclawPl
animals_urls = {
"animals": [
"http://schroniskowroclaw.pl/displaywp_project/burbon-22117/",
"http://schroniskowroclaw.pl/displaywp_project/nelson-10117/",
],
"next_page": "http://schroniskowroclaw.pl/zwierzeta-do-adopcji/?page=2",
}
@classmethod
def setUpClass(cls):
fp = Path(__file__).parent / 'assets' / 'animal_01.html'
with open(fp, 'r') as f:
cls.animal = f.read()
fp = Path(__file__).parent / 'assets' / 'animals_list_01.html'
with open(fp, 'r') as f:
cls.animals_list = f.read()
def test__full_url(self):
for url in ("/partial-url", "other-relative", "/another?p=1", "http://example.org/remote"):
with self.subTest(url=url):
full = self.shelter._full_url(url)
base = '{url.scheme}://{url.netloc}/'.format(url=urlparse(self.shelter.start_url))
valid_full = urljoin(base, url)
self.assertEqual(valid_full, full)
def test__parse(self):
valid_data = {
'name': 'Nelson',
'id': '101/17',
'photos': [
'http://schroniskowroclaw.pl/wp-content/uploads/2017/02/DSCF9115.jpg',
'http://schroniskowroclaw.pl/wp-content/uploads/2017/02/DSCF9120.jpg',
],
'since': '18.02.2017',
'category': 'Koty', # TODO i18n
'description': [
'Nelson ma 4-lata, został oddany. Duży, gruby, piękny kocur. Wykastrowany.',
'Będzie do adopcji od: 4.03.2017',
],
}
data = self.shelter._parse(self.animal)
self.a
|
ssertDictEqual(data, valid_data)
class TestNaPaluchuWawPl(TestConcreteShelter, unittest.TestCase, metaclass=AsyncMeta):
shelter_class = fetch.NaPaluchuWawPl
animals_urls = {
"animals": [
"http://www.napaluchu.waw.pl/czekam_na_ciebie/wszystkie_zwierzeta_do_adopcji/011100429",
"http://www.napaluchu.waw.pl/czekam_na_ciebie/wszystkie_zwierzeta_do_adopcji/000801535",
],
"next_page": "http://www
|
.napaluchu.waw.pl/czekam_na_ciebie/wszystkie_zwierzeta_do_adopcji:2",
}
@classmethod
def setUpClass(cls):
fp = Path(__file__).parent / 'assets' / 'animal_11.html'
with open(fp, 'r') as f:
cls.animal = f.read()
fp = Path(__file__).parent / 'assets' / 'animals_list_11.html'
with open(fp, 'r') as f:
cls.animals_list = f.read()
def test__parse(self):
vali
|
Robobench/rapman-subuser
|
logic/subuserCommands/repair.py
|
Python
|
lgpl-3.0
| 1,044
| 0.025862
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
import pathConfig
#external imports
import optparse,sys
#internal imports
import subuserlib.classes.user,subuserlib.verify,subuserlib.commandLineArguments
####################################################
def parseCliArgs(realArgs):
usage = "usage: subuser %pr
|
og [options]"
description = """
Repair your subuser installation.
This is usefull when migrating from one machine to another. You can copy your ~/.subuser folder to the new machine and run repair, and things should just work.
"""
parser = optparse.OptionParser(usage=usage,description=description,formatter=subuserlib.commandLineArguments.HelpFormatterThatDoesntReformatDescription())
return parser.parse_args(args=r
|
ealArgs)
def verify(realArgs):
options,arguments=parseCliArgs(realArgs)
user = subuserlib.classes.user.User()
subuserlib.verify.verify(user)
user.getRegistry().commit()
if __name__ == "__main__":
verify(sys.argv[1:])
|
Gjum/SpockBot
|
spockbot/mcdata/recipes.py
|
Python
|
mit
| 2,639
| 0
|
from collections import defaultdict, namedtuple
from minecraft_data.v1_8 import recipes as raw_recipes
RecipeItem = namedtuple('RecipeItem', 'id meta amount')
class Recipe(object):
def __init__(self, raw):
self.result = reformat_item(raw['result'], None)
if 'ingredients' in raw:
self.ingredients = [reformat_item(item, 0)
for item in raw['ingredients']]
self.in_shape = None
self.out_shape = None
else:
self.in_shape = reformat_shape(raw['inShape'])
self.out_shape = reformat_shape(raw['outShape']) \
if 'outShape' in raw else None
self.ingredients = [item for row in self.in_shape for item in row]
@property
def total_ingredient_amounts(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> amount }
"""
totals = defaultdict(int)
for id, meta, amount in self.ingredients:
totals[(id, meta)] += amount
return totals
@property
def ingredient_positions(self):
"""
Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] }
"""
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions
def reformat_item(raw, default_meta=None):
if isinstance(raw, dict):
raw = raw.copy() # do not modify arg
if 'metadata' not in raw:
raw['metadata'] = default_me
|
ta
if 'count' not in raw:
raw['count'] = 1
return RecipeItem(raw['id'], raw['met
|
adata'], raw['count'])
elif isinstance(raw, list):
return RecipeItem(raw[0], raw[1], 1)
else: # single ID or None
return RecipeItem(raw or None, default_meta, 1)
def reformat_shape(shape):
return [[reformat_item(item, None) for item in row] for row in shape]
def iter_recipes(item_id, meta=None):
item_id = str(item_id)
meta = meta and int(meta)
try:
recipes_for_item = raw_recipes[item_id]
except KeyError:
return # no recipe found, do not yield anything
else:
for raw in recipes_for_item:
recipe = Recipe(raw)
if meta is None or meta == recipe.result.meta:
yield recipe
def get_any_recipe(item, meta=None):
# TODO return small recipes if present
for matching in iter_recipes(item, meta):
return matching
return None
|
sahat/bokeh
|
sphinx/source/tutorial/exercises/boxplot.py
|
Python
|
bsd-3-clause
| 2,199
| 0.003638
|
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Generate some synthetic time series for six different categories
cats = list("abcdef")
y = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
y[g == l] += i // 2
df = pd.DataFrame(dict(score=y, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q2 + 1.5*iqr
lower = q2 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# EXERCISE: turn on plot hold
# Draw the upper segment extending from the box plot using `segment` which
# takes x0, x1 and y0, y1 as data
segment(cats, upper.score, cats, q3.score, x_range=cats, line_width=2,
tools="", background_fill="#EFE8E2", line_color="black", title="")
# EXERCISE: draw the lower segment
# Draw the upper box of the box plot using `rect`
rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `rect` to draw the bottom box with a different color
# OK here we u
|
se `rect` t
|
o draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
rect(cats, lower.score, 0.2, 0, line_color="black")
rect(cats, upper.score, 0.2, 0, line_color="black")
# EXERCISE: use `circle` to draw the outliers
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
xgrid().grid_line_color = None
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
xaxis().major_label_text_font_size="12pt"
show()
|
mahak/neutron
|
neutron/extensions/l3_ext_ndp_proxy.py
|
Python
|
apache-2.0
| 817
| 0
|
# Copyright 2022 Troila
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF A
|
NY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import l3_ext_ndp_proxy as apidef
from neutron_lib.api
|
import extensions
class L3_ext_ndp_proxy(extensions.APIExtensionDescriptor):
api_definition = apidef
|
jrtomps/root
|
main/python/cmdLineUtils.py
|
Python
|
lgpl-2.1
| 47,974
| 0.01357
|
#!/usr/bin/env @python@
# ROOT command line tools module: cmdLineUtils
# Author: Julien Ripoche
# Mail: julien.ripoche@u-psud.fr
# Date: 20/08/15
"""Contain utils for ROOT command line tools"""
##########
# Stream redirect functions
# The original code of the these functions can be found here :
# http://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
# Thanks J.F. Sebastian !!
from contextlib import contextmanager
import os
import sys
def fileno(file_or_fd):
"""
Look for 'fileno' attribute.
"""
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def streamRedirected(source=sys.stdout, destination=os.devnull):
"""
Redirect the output from source to destination.
"""
stdout_fd = fileno(source)
# copy stdout_fd before it is overwritten
#NOTE: `copied` is inheritable on Windows when duplicating a standard stream
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
source.flush() # flush library buffers that dup2 knows nothing about
try:
os.dup2(fileno(destination), stdout_fd) # $ exec >&destination
except ValueError: # filename
with open(destination, 'wb') as destination_file:
os.dup2(destination_file.fileno(), stdout_fd) # $ exec > destination
try:
yield source # allow code to be run with the redirected stream
finally:
# restore source to its previous value
#NOTE: dup2 makes stdout_fd inheritable unconditionally
source.flush()
os.dup2(copied.fileno(), stdout_fd) # $ exec >&copied
def stdoutRedirected():
"""
Redirect the output from sys.stdout to os.devnull.
"""
return streamRedirected(sys.stdout, os.devnull)
def stderrRedirected():
"""
Redirect the output from sys.stderr to os.devnull.
"""
return streamRedirected(sys.stderr, os.devnull)
# The end of streamRedirected functions
##########
##########
# Imports
##
# redirect output (escape characters during ROOT importation...)
# The gymnastic with sys argv is necessary to workaround for ROOT-7577
argvTmp = sys.argv[:]
sys.argv = []
with stdoutRedirected():
import ROOT
ROOT.gROOT.GetVersion()
sys.argv = argvTmp
import argparse
import glob
import fnmatch
import logging
LOG_FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=LOG_FORMAT)
# The end of imports
##########
##########
# Different functions to get a parser of arguments and options
def _getParser(theHelp, theEpilog):
"""
Get a commandline parser with the defaults of the commandline utils.
"""
return argparse.ArgumentParser(description=theHelp,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = theEp
|
ilog)
def getParserSingleFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline uti
|
ls and a
source file or not.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='?', help="Input file")
return parser
def getParserFile(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils and a
list of source files.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("FILE", nargs='+', help="Input file")
return parser
def getParserSourceDest(theHelp, theEpilog=""):
"""
Get a commandline parser with the defaults of the commandline utils,
a list of source files and a destination file.
"""
parser = _getParser(theHelp, theEpilog)
parser.add_argument("SOURCE", nargs='+', help="Source file")
parser.add_argument("DEST", help="Destination file")
return parser
# The end of get parser functions
##########
##########
# Several utils
@contextmanager
def _setIgnoreLevel(level):
originalLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = level
yield
ROOT.gErrorIgnoreLevel = originalLevel
def changeDirectory(rootFile,pathSplit):
"""
Change the current directory (ROOT.gDirectory) by the corresponding (rootFile,pathSplit)
"""
rootFile.cd()
for directoryName in pathSplit:
theDir = ROOT.gDirectory.Get(directoryName)
if not theDir:
logging.warning("Directory %s does not exist." %directoryName)
return 1
else:
theDir.cd()
return 0
def createDirectory(rootFile,pathSplit):
"""
Add a directory named 'pathSplit[-1]' in (rootFile,pathSplit[:-1])
"""
retcode = changeDirectory(rootFile,pathSplit[:-1])
if retcode == 0: ROOT.gDirectory.mkdir(pathSplit[-1])
return retcode
def getFromDirectory(objName):
"""
Get the object objName from the current directory
"""
return ROOT.gDirectory.Get(objName)
def isExisting(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), exits
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetListOfKeys().Contains(pathSplit[-1])
def isDirectoryKey(key):
"""
Return True if the object, corresponding to the key, inherits from TDirectory
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TDirectory.Class())
def isTreeKey(key):
"""
Return True if the object, corresponding to the key, inherits from TTree
"""
classname = key.GetClassName()
cl = ROOT.gROOT.GetClass(classname)
return cl.InheritsFrom(ROOT.TTree.Class())
def getKey(rootFile,pathSplit):
"""
Get the key of the corresponding object (rootFile,pathSplit)
"""
changeDirectory(rootFile,pathSplit[:-1])
return ROOT.gDirectory.GetKey(pathSplit[-1])
def isDirectory(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TDirectory
"""
if pathSplit == []: return True # the object is the rootFile itself
else: return isDirectoryKey(getKey(rootFile,pathSplit))
def isTree(rootFile,pathSplit):
"""
Return True if the object, corresponding to (rootFile,pathSplit), inherits from TTree
"""
if pathSplit == []: return False # the object is the rootFile itself
else: return isTreeKey(getKey(rootFile,pathSplit))
def getKeyList(rootFile,pathSplit):
"""
Get the list of keys of the directory (rootFile,pathSplit),
if (rootFile,pathSplit) is not a directory then get the key in a list
"""
if isDirectory(rootFile,pathSplit):
changeDirectory(rootFile,pathSplit)
return ROOT.gDirectory.GetListOfKeys()
else: return [getKey(rootFile,pathSplit)]
def keyListSort(keyList):
"""
Sort list of keys by their names ignoring the case
"""
keyList.sort(key=lambda x: x.GetName().lower())
def tupleListSort(tupleList):
"""
Sort list of tuples by their first elements ignoring the case
"""
tupleList.sort(key=lambda x: x[0].lower())
def dirListSort(dirList):
"""
Sort list of directories by their names ignoring the case
"""
dirList.sort(key=lambda x: [n.lower() for n in x])
def keyClassSpliter(rootFile,pathSplitList):
"""
Return a list of directories and a list of keys corresponding
to the other objects, for rootLs and rooprint use
"""
keyList = []
dirList = []
for pathSplit in pathSplitList:
if pathSplit == []: dirList.append(pathSplit)
elif isDirectory(rootFile,pathSplit): dirList.append(pathSplit)
else: keyList.append(getKey(rootFile,pathSplit))
keyListSort(keyList)
dirListSort(dirList)
return keyList,dirList
def openROOTFile(fileName, mode="read"):
"""
Open the ROOT file corresponding to fileName in the corresponding mode,
redirecting the output not to see missing dictionnaries
"""
#with stderrRedirected():
with _setIgnoreLevel(ROOT.kError):
theFile = ROOT.TFile.Open(fileName, mode)
if not theFile:
logging.warning(
|
Yukarumya/Yukarum-Redfoxes
|
testing/mozharness/configs/single_locale/tc_linux32.py
|
Python
|
mpl-2.0
| 927
| 0.001079
|
import os
EN_US_BINARY_URL = "%(en_us_
|
binary_url)s"
config = {
"locales_file": "src/browser/locales/all-locales",
"tools_repo": "https://hg.mozilla.org/build/tools",
"mozconfig": "src/browser/config/mozconfigs/linux32/l10n-mozconfig",
"bootstrap_env": {
"NO_MERCURIAL_SETUP_CHECK": "1",
"MOZ_OBJDIR": "obj-l10n",
"EN_US_BINARY_URL": os.environ.get("EN_US_BINARY_URL", EN_US_BINARY_URL),
|
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"MOZ_UPDATE_CHANNEL": "%(update_channel)s",
"DIST": "%(abs_objdir)s",
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"L10NBASEDIR": "../../l10n",
"MOZ_MAKE_COMPLETE_MAR": "1",
'TOOLTOOL_CACHE': os.environ.get('TOOLTOOL_CACHE'),
},
"upload_env": {
'UPLOAD_HOST': 'localhost',
'UPLOAD_PATH': '/home/worker/artifacts/',
},
"mozilla_dir": "src/",
"simple_name_move": True,
}
|
Kirkkonen/NetForSpeech
|
nfsmain/models.py
|
Python
|
apache-2.0
| 3,681
| 0.001642
|
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from datetime import date
# Create your models here.
class ManagedEntity():
# submitted_by = models.ForeignKey()
submitted_at = models.DateField(auto_now_add=True)
class Organisation(models.Model):
name = models.CharField(max_length=512)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('main:organisation_detail', kwargs={'pk': self.pk})
class Media(models.Model):
name = models.CharField(max_length=512, default='[ИМЯ НЕ ПРИСВОЕНО]')
home_url = models.CharField(max_length=512, blank=True)
state = models.CharField(choices=[('Y', 'Да'), ('N', 'Нет')], max_length=1, default='N')
def __str__(self):
return self.name
class Event(models.Model):
name = models.CharField(max_length=512)
class Communication(models.Model):
pass
class CommunicationMixIn():
# FIXME Makemigrations throws and error when moving O2OField comm to the mixin
def save(self, *args, **kwargs):
if not self.comm_id:
comm = Communication()
comm.save()
self.comm = comm
return super().save(*args, **kwargs)
class Interview(CommunicationMixIn, models.Model):
comm = models.OneToOneField(Communication, primary_key=True, blank=True)
origin = models.ForeignKey(Media)
class Speech(CommunicationMixIn, models.Model):
comm = models.OneToOneField(Communication, primary_key=True, blank=True)
origin = models.ForeignKey(Event)
class Speaker(models.Model):
index_name = models.CharField(max_length=256)
secondary_names = models.CharField(max_length=256)
other_names = models.CharField(max_length=256, blank=True)
birth_date = models.DateField(blank=True, null=True)
current_work = models.ForeignKey(Organi
|
sation, blank=True, related_name='employee_current_
|
set')
previous_work = models.ManyToManyField(Organisation, blank=True, related_name='employee_former_set')
def __str__(self):
return ' '.join([self.index_name, self.secondary_names, self.other_names])
def get_absolute_url(self):
return reverse('main:speaker_detail', kwargs={'pk': self.pk})
class ThemeTag(models.Model):
caption = models.CharField(max_length=128)
class Record(models.Model):
text = models.TextField()
datestamp = models.DateField(default=timezone.now)
timestamp = models.TimeField(blank=True, null=True)
source_url = models.CharField(max_length=2048)
media = models.ForeignKey(Media, blank=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
medias = Media.objects.all()
for media in medias:
if media.home_url in self.source_url:
break
else:
media = Media(home_url=self.source_url)
media.save()
self.media = media
super(Record, self).save(*args, **kwargs)
class Fact(Record):
def __str__(self):
return '«{}...» от {}'.format(self.text[:50], self.media)
def get_absolute_url(self):
return reverse('main:fact_detail', kwargs={'pk': self.pk})
class Statement(Record):
theme_tag = models.CharField(max_length=256, blank=True)
speaker = models.ForeignKey(Speaker)
communication = models.CharField(max_length=256, blank=True)
# communication = models.ForeignKey(Communication)
def __str__(self):
return '«{}...» от {}'.format(self.text[:50], self.speaker)
def get_absolute_url(self):
return reverse('main:statement_detail', kwargs={'pk': self.pk})
|
rabipanda/tensorflow
|
tensorflow/python/debug/lib/debug_gradients.py
|
Python
|
apache-2.0
| 15,460
| 0.004851
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger: Tools for debugging gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import uuid
import six
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
_GRADIENT_DEBUG_TAG = "gradient_debug_"
_gradient_debuggers = {}
def _tensor_to_grad_debug_op_name(tensor, grad_debugger_uuid):
op_name, slot = debug_graphs.parse_node_or_tensor_name(tensor.name)
return "%s_%d/%s%s" % (op_name, slot, _GRADIENT_DEBUG_TAG, grad_debugger_uuid)
def _parse_grad_debug_op_name(op_name):
"""Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op.
"""
name_items = op_name.split("/")
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if "_" in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index("_")]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind("_") + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind("_")]
orig_tensor_name = ("/".join(name_items[:-2] + [orig_base_op_name]) +
":%d" % orig_tensor_slot)
return grad_debugger_uuid, orig_tensor_name
class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., @{tf.gradients} and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with repsect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
are created, e.g., during @{tf.gradients} calls or the construction
of optimization (training) op that uses @{tf.gradients}.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
identity_op = (gen_array_ops._debug_gradient_ref_identity
if input_tensor.dtype._is_ref_dtype
else gen_array_ops._debug_gradient_identity)
debug_grad_identity = identity_op(input_tensor, name=grad_debug_op_name)
# pylint: enable=protected-access
assert debug_grad_identity.dtype == input_tensor.dtype
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with repsect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gr
|
adients_by_tensors(y):
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Ret
|
urns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method,
|
pierotofy/WebODM
|
app/tests/test_api_task_import.py
|
Python
|
mpl-2.0
| 6,442
| 0.002173
|
import os
import time
import io
import requests
from django.contrib.auth.models import User
from guardian.shortcuts import remove_perm, assign_perm
from rest_framework import status
from rest_framework.test import APIClient
import worker
from app.cogeo import valid_cogeo
from app.models import Project
from app.models import Task
from app.tests.classes import BootTransactionTestCase
from app.tests.utils import clear_test_media_root, start_processing_node
from nodeodm import status_codes
from nodeodm.models import ProcessingNode
from webodm import settings
class TestApiTask(BootTransactionTestCase):
def setUp(self):
super().setUp()
clear_test_media_root()
def test_task(self):
client = APIClient()
with start_processing_node():
user = User.objects.get(username="testuser")
self.assertFalse(user.is_superuser)
project = Project.objects.create(
owner=user,
name="test project"
)
image1 = open("app/fixtures/tiny_drone_image.jpg", 'rb')
image2 = open("app/fixtures/tiny_drone_image_2.jpg", 'rb')
# Create processing node
pnode = ProcessingNode.objects.create(hostname="localhost", port=11223)
client.login(username="testuser", password="test1234")
# Create task
res = client.post("/api/projects/{}/tasks/".format(project.id), {
'images': [image1, image2]
}, format="multipart")
image1.close()
image2.close()
task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
task.refresh_from_db()
if task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(task.status, status_codes.COMPLETED)
# Download task assets
task_uuid = task.uuid
res = client.get("/api/projects/{}/tasks/{}/download/all.zip".format(project.id, task.id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
if not os.path.exists(settings.MEDIA_TMP):
os.mkdir(settings.MEDIA_TMP)
assets_path = os.path.join(settings.MEDIA_TMP, "all.zip")
with open(assets_path, 'wb') as f:
f.write(res.content)
remove_perm('change_project', user, project)
assets_file = open(assets_path, 'rb')
# Cannot import unless we have permission
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'file': [assets_file]
}, format="multipart")
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
assign_perm('change_project', user, project)
# Import with file upload method
assets_file.seek(0)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'file': [assets_file]
}, format="multipart")
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
assets_file.close()
file_import_task = Task.objects.get(id=res.data['id'])
# Wait for co
|
mpletion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
file_import_task.refresh_from_db()
if file_import_task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(file_import_task.import_url, "file://all.zip")
self.assertEqual(file_import_task.images_count, 1)
self.assertEqual(file_import_task.processin
|
g_node, None)
self.assertEqual(file_import_task.auto_processing_node, False)
# Can access assets
res = client.get("/api/projects/{}/tasks/{}/assets/odm_orthophoto/odm_orthophoto.tif".format(project.id, file_import_task.id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["orthophoto.tif"])))
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["dsm.tif"])))
self.assertTrue(valid_cogeo(file_import_task.assets_path(task.ASSETS_MAP["dtm.tif"])))
# Set task public so we can download from it without auth
file_import_task.public = True
file_import_task.save()
# Import with URL method
assets_import_url = "http://{}:{}/task/{}/download/all.zip".format(pnode.hostname, pnode.port, task_uuid)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'url': assets_import_url
})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
url_task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
url_task.refresh_from_db()
if url_task.status == status_codes.COMPLETED:
break
c += 1
time.sleep(1)
self.assertEqual(url_task.import_url, assets_import_url)
self.assertEqual(url_task.images_count, 1)
# Import corrupted file
assets_import_url = "http://{}:{}/task/{}/download/orthophoto.tif".format(pnode.hostname, pnode.port, task_uuid)
res = client.post("/api/projects/{}/tasks/import".format(project.id), {
'url': assets_import_url
})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
corrupted_task = Task.objects.get(id=res.data['id'])
# Wait for completion
c = 0
while c < 10:
worker.tasks.process_pending_tasks()
corrupted_task.refresh_from_db()
if corrupted_task.status == status_codes.FAILED:
break
c += 1
time.sleep(1)
self.assertEqual(corrupted_task.status, status_codes.FAILED)
self.assertTrue("Invalid" in corrupted_task.last_error)
|
sjhawke/piface-cad
|
lib/writetheweather.py
|
Python
|
gpl-3.0
| 1,742
| 0.011481
|
#!/usr/bin/env python3
import os
import requests
import time
import json
def getWeatherAsWords():
# Settings for your API key and location details read from environment strings, e.g. exports etc
appid = str(os.environ.get('apikey'))
locid = str(os.environ.get('locationkey'))
lat = str(os.environ.get('lat'))
lon = str(os.environ.get('lon'))
uri = "https://api.openweathermap.org/data/2.5/weather?id=" + locid + "&units=metric&APPID=" + appid
# print(uri)
weather = "no weather data"
try:
r = requests.get(uri, timeout=5)
#print (str(r.
|
status_code))
if r.status_code == 200:
# get count - check for zero and return amber.
body = r.json()
w = body['weather']
main = body['main']
temp = int(round(main['temp'],0))
outlook = w[0]['description']
weather = str(temp) + "C " + str(main['humidity']) + "%Hu " + outlook
uv_uri = "https://api.openweathermap.org/data/2.5/uvi?lon=" + lon + "&lat=" + lat + "&APPID=" + appid
|
# print(uv_uri)
uvreq = requests.get(uv_uri, timeout=5)
if uvreq.status_code == 200:
body = uvreq.json()
uv = body['value']
uvindex = ''
if(uv>8.0):
uvindex = 'VH'
elif(uv>5.0):
uvindex = "H"
elif(uv>2.0):
uvindex = "M"
else:
uvindex = "L"
weather = str(temp) + "C " + str(main['humidity']) + "%Hu " + "UV:" + uvindex + "\n" + outlook
except:
pass
# we swallow all communication errors
return weather
if __name__ == '__main__':
print(getWeatherAsWords())
|
eduNEXT/edx-platform
|
common/djangoapps/course_modes/migrations/0005_auto_20151217_0958.py
|
Python
|
agpl-3.0
| 1,002
| 0.000998
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0004_auto_20151113_1457'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[],
state_operations=[
migrations.RemoveField(
model_name='coursemode',
name='expiration_datetime',
),
migrations.AddField(
|
model_name='coursemode',
name='_expiration_datetime',
field=models.DateTimeField(db_column='expiration_datetime', default=None, blank=True, help_text='OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. Leave this blank if users can enroll in this mode until enrollment clo
|
ses for the course.', null=True, verbose_name='Upgrade Deadline'),
),
]
)
]
|
eggsandbeer/scheduler
|
synergy/mx/tree_node_details.py
|
Python
|
bsd-3-clause
| 2,627
| 0.002284
|
__author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.conf import settings
from synergy.system import time_helper
from synergy.conf import context
from synergy.mx.rest_model import RestTimetableTreeNode, RestJob
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
class TreeNodeDetails(BaseRequestHandler):
def __init__(self, request, **values):
super(TreeNodeDetails, self).__init__(request, **values)
self.process_name = request.args.get('process_name')
self.timeperiod = request.args.get('timeperiod')
self.tree = self.scheduler.timetable.get_tree(self.process_name)
self.is_re
|
quest_valid = True if self.tree else False
@classmethod
def get_details(cls, node, as_model=False):
"""method returns either RestJob instance or corresponding document, depending on the as_model argument """
rest_job = RestJob(
process_name=node.process_name,
timeperiod=node.timeperiod,
time_qualifier=node.time_qualifier,
number_of_children=
|
len(node.children),
number_of_failures='NA' if not node.job_record else node.job_record.number_of_failures,
state='NA' if not node.job_record else node.job_record.state,
log=node.job_record.log)
if as_model:
return rest_job
else:
return rest_job.document
@cached_property
@valid_action_request
def details(self):
rest_node = RestTimetableTreeNode()
if not self.timeperiod:
# return list of yearly nodes OR leafs for linear tree
# limit number of children to return, since a linear tree can holds thousands of nodes
sorted_keys = sorted(self.tree.root.children.keys(), reverse=True)
sorted_keys = sorted_keys[:settings.settings['mx_children_limit']]
for key in sorted_keys:
child = self.tree.root.children[key]
rest_node.children[key] = TreeNodeDetails.get_details(child)
else:
time_qualifier = context.process_context[self.process_name].time_qualifier
self.timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, self.timeperiod)
node = self.tree.get_node(self.process_name, self.timeperiod)
rest_node.node = TreeNodeDetails.get_details(node, as_model=True)
for key in node.children:
child = node.children[key]
rest_node.children[key] = TreeNodeDetails.get_details(child)
return rest_node.document
|
mtholder/phyloplumber
|
phyloplumber/controllers/projects.py
|
Python
|
gpl-3.0
| 8,738
| 0.005035
|
# The projects controller provides an interface for version-controlled projects
# stored on the fs as subdirectories of
# ${top_internal_dir}/projects where top_internal_dir is a variable in the
# .ini file used to launch the server.
#
# The structure of these directories is assumed to be:
# name.txt -- plain text file with the user-assigned name of the project
# index.xml
#
# The name of each project sub-directories is simply a unique number assigned
# internally by phyloplumber
#
#
import logging, os, sys, uuid
import phyloplumber.lib.helpers as h
import formencode
from formencode import htmlfill
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from phyloplumber.model import meta, PhyloplumberProject
from phyloplumber.lib.index_nexml import new_index
from phyloplumber.lib.base import BaseController, render, is_debug_mode, CorruptedProjectError, InvalidProjectIDError
log = logging.getLogger(__name__)
from phyloplumber.lib.base import get_internal_dir, get_external_dir, serves_projects
import dendropy
class ProjectWrapper(object):
def __init__(self, project_id, dir):
self.dir = dir
self.project_id = project_id
self.name = read_
|
name_for_project(dir)
index_file = os.path.join(self.dir, 'index.xml')
try:
inp = open(index_file, 'rU')
except:
if os.path.exists(self.dir):
raise CorruptedProjectError(project_id, 'index file missing')
raise InvalidProjectIDError(project_id)
try:
self.data_set = dendropy.DataSet(stream=inp, schema='nexml')
except Exception, x:
|
raise CorruptedProjectError(project_id, 'Error parsing the index file:\n' + str(x))
def get_entities(self):
return []
entity_list = property(get_entities)
class NewProjectForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
name = formencode.validators.UnicodeString(not_empty=True)
def get_internal_projects_dir():
return get_internal_dir('projects')
def get_project_subdir_names():
e = get_internal_projects_dir()
log.debug("Checking %s for subdirectories\n" % e)
sub = os.listdir(e)
proj_sub = []
for i in sub:
project_dir = os.path.join(e, i)
if os.path.isdir(project_dir):
proj_sub.append(i)
return proj_sub
def next_project_id():
dir_list = get_project_subdir_names()
next_ind = 0
for d in dir_list:
try:
i = int(d)
if i >= next_ind:
next_ind = 1 + i
except:
pass
dirname = str(uuid.uuid4())
return dirname
def get_relative_dir_for_project(project_id):
return project_id
def read_project(project_id):
dir_fragment = get_relative_dir_for_project(project_id)
dir = os.path.join(get_internal_projects_dir(), dir_fragment)
return ProjectWrapper(project_id=project_id, dir=dir)
def read_name_for_project(project_dir):
try:
name_file = os.path.join(project_dir, 'name.txt')
return open(name_file, 'rU').read().strip()
except:
return "Corrupted Project"
def get_project_summary_tuple(i):
e = get_internal_dir('projects')
project_dir = os.path.join(e,i)
project_name = read_name_for_project(project_dir)
return (project_name, h.url(controller='projects', action='show', id=i), i)
class ProjectsController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('project', 'projects')
def index(self, format='html'):
"""GET /projects: All items in the collection"""
if not serves_projects():
response.status = '403 Forbidden (projects not enabled for this phyloplumber instance)'
return 'Projects not enabled for this phyloplumber instance'
sub = get_project_subdir_names()
c.subdirs = [get_project_summary_tuple(i) for i in sub]
return render('/projects.html')
def create(self):
"""POST /projects: Create a new item"""
# Figure out what the next number to use for the subdirectory -- not guaranteed to be unique across instances, so this is not too crucial
if not serves_projects():
response.status = '403 Forbidden (projects not enabled for this phyloplumber instance)'
return 'Projects not enabled for this phyloplumber instance'
schema = NewProjectForm()
try:
c.form_result = schema.to_python(dict(request.params))
except formencode.Invalid, error:
c.form_result = error.value
c.form_errors = error.error_dict or {}
html = render('/new_project.html')
return htmlfill.render(
html,
defaults=c.form_result,
errors=c.form_errors
)
dirname = next_project_id()
full_path_to_dir = get_internal_dir(os.path.join('projects', dirname))
p_proj = PhyloplumberProject()
p_proj.id = dirname
p_proj.parent_dirname = dirname
try:
meta.Session.save(p_proj)
finally:
meta.Session.commit()
log.debug('proj_id = %(id)s\nparent_dirname = %(dir)s\n' % {'id' : p_proj.id , 'dir' : dirname})
log.debug('request.params = %(p)s\n' % {'p' : str(request.params)})
name = request.params.get('name')
description = request.params.get('description')
full_path_to_index = os.path.join(full_path_to_dir, 'index.xml')
out = open(full_path_to_index, 'w')
out.write(new_index(name, project_description=description, project_id=p_proj.id))
out.close()
out = open(os.path.join(full_path_to_dir, 'name.txt'), 'w')
out.write(name + '\n')
out.close()
new_index(name, description)
fn = request.params.get('file')
if fn:
pass
out_format = request.params.get('format', 'html')
response.status_int = 201
if out_format == 'xml':
c.subdirs = [get_project_summary_tuple(i)]
return render('/projects.xml')
u = url(controller="projects", action='show', id=str(p_proj.id))
redirect(u)
def new(self, format='html'):
"""GET /projects/new: Form to create a new item"""
if not serves_projects():
response.status = '403 Forbidden (projects not enabled for this phyloplumber instance)'
return 'Projects not enabled for this phyloplumber instance'
c.debug = is_debug_mode()
return render('/new_project.html')
def update(self, id):
"""PUT /projects/id: Update an existing item"""
if not serves_projects():
response.status = '403 Forbidden (projects not enabled for this phyloplumber instance)'
return 'Projects not enabled for this phyloplumber instance'
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='put')
# url('project', id=ID)
def delete(self, id):
"""DELETE /projects/id: Delete an existing item"""
if not serves_projects():
response.status = '403 Forbidden (projects not enabled for this phyloplumber instance)'
return 'Projects not enabled for this phyloplumber instance'
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('project', id=ID),
# method='delete')
# url('project', id=ID)
return u'blah delete'
def show(self, id, format='html'):
"""GET /projects/id: Show a specific item"""
if not serves_projects():
response.status = '403 Forbidden (p
|
mikitex70/pelican-plugins
|
i18n_subsites/test_i18n_subsites.py
|
Python
|
agpl-3.0
| 5,011
| 0.000798
|
'''Unit tests for the i18n_subsites plugin'''
import os
import locale
import unittest
import subprocess
from tempfile import mkdtemp
from shutil import rmtree
from . import i18n_subsites as i18ns
from pelican import Pelican
from pelican.tests.support import get_settings
from pelican.settings import read_settings
class TestTemporaryLocale(unittest.TestCase):
'''Test the temporary locale context manager'''
def test_locale_restored(self):
'''Test that the locale is restored after exiting context'''
orig_locale = locale.setlocale(locale.LC_ALL)
with i18ns.temporary_locale():
locale.setlocale(locale.LC_ALL, 'C')
self.assertEqual(locale.setlocale(locale.LC_ALL), 'C')
self.assertEqual(locale.setlocale(locale.LC_ALL), orig_locale)
def test_temp_locale_set(self):
'''Test that the temporary locale is set'''
with i18ns.temporary_locale('C'):
self.assertEqual(locale.setlocale(locale.LC_ALL), 'C')
class TestSettingsManipulation(unittest.TestCase):
'''Test operations on settings dict'''
def setUp(self):
'''Prepare default settings'''
self.settings = get_settings()
def test_get_pelican_cls_class(self):
'''Test that we get class given as an object'''
self.settings['PELICAN_CLASS'] = object
cls = i18ns.get_pelican_cls(self.settings)
self.assertIs(cls, object)
def test_get_pelican_cls_str(self):
'''Test that we get correct class given by string'''
cls = i18ns.get_pelican_cls(self.settings)
self.assertIs(cls, Pelican)
class TestSitesRelpath(unittest.TestCase):
'''Test relative path between sites generation'''
def setUp(self):
'''Generate some sample siteurls'''
self.siteurl = 'http://example.com'
i18ns._SITE_DB['en'] = self.siteurl
i18ns._SITE_DB['de'] = self.siteurl + '/de'
def tearDown(self):
'''Remove sites from db'''
i18ns._SITE_DB.clear()
def test_get_site_path(self):
'''Test getting the path within a site'''
self.assertEqual(i18ns.get_site_path(self.siteurl), '/')
self.assertEqual(i18ns.get_site_path(self.siteurl + '/de'), '/de')
def test_relpath_to_site(self):
'''Test getting relative paths between sites'''
self.assertEqual(i18ns.relpath_to_site('en', 'de'), 'de')
self.assertEqual(i18ns.relpath_to_site('de', 'en'), '..')
class TestRegistration(unittest.TestCase):
'''Test plugin registration'''
def test_return_on_missing_signal(self):
'''Test return on missing required signal'''
i18ns._SIGNAL_HANDLERS_DB['tmp_sig'] = None
i18ns.register()
self.assertNotIn(id(i18ns.save_generator),
i18ns.signals.generator_init.receivers)
def test_registration(self):
'''Test registration of all signal handlers'''
i18ns.register()
for sig_name, handler in i18
|
ns._SIGNAL_HANDLERS_DB.items():
sig = getattr(i18ns.signals, sig_name)
self.assertIn(id(handler), sig.receivers)
# clean up
sig.disconnect(handler)
class TestFullRun(unittest.TestCase):
'''Test runni
|
ng Pelican with the Plugin'''
def setUp(self):
'''Create temporary output and cache folders'''
self.temp_path = mkdtemp(prefix='pelicantests.')
self.temp_cache = mkdtemp(prefix='pelican_cache.')
def tearDown(self):
'''Remove output and cache folders'''
rmtree(self.temp_path)
rmtree(self.temp_cache)
def test_sites_generation(self):
'''Test generation of sites with the plugin
Compare with recorded output via ``git diff``.
To generate output for comparison run the command
``pelican -o test_data/output -s test_data/pelicanconf.py \
test_data/content``
Remember to remove the output/ folder before that.
'''
base_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.join(base_path, 'test_data')
content_path = os.path.join(base_path, 'content')
output_path = os.path.join(base_path, 'output')
settings_path = os.path.join(base_path, 'pelicanconf.py')
settings = read_settings(path=settings_path, override={
'PATH': content_path,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
'PLUGINS': [i18ns],
}
)
pelican = Pelican(settings)
pelican.run()
# compare output
out, err = subprocess.Popen(
['git', 'diff', '--no-ext-diff', '--exit-code', '-w', output_path,
self.temp_path], env={'PAGER': ''},
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
self.assertFalse(out, 'non-empty `diff` stdout:\n{}'.format(out))
self.assertFalse(err, 'non-empty `diff` stderr:\n{}'.format(err))
|
open-craft/opencraft
|
instance/serializers/openedx_appserver.py
|
Python
|
agpl-3.0
| 3,375
| 0.000296
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <contact@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Open edX AppServer serializers (API representation)
"""
# Imports #####################################################################
from rest_framework import serializers
from instance.models.openedx_appserver import OpenEdXAppServer, OpenEdXAppConfiguration
from instance.serializers.appserver import AppServerBasicSerializer
from instance.serializers.instance import InstanceReferenceMinimalSerializer
from instance.serializers.logentry import LogEntrySerializer
from instance.serializers.server import OpenStackServerSerializer
# Serializers ###################################################
|
##############
class OpenEdXAppServerSerializer(serializers.ModelSerializer):
"""
Detailed serializer for OpenEdXAppServer
"""
instance = InstanceReferenceMinimalSerializer(source='owner')
server = OpenStackServerSerializer()
class Meta:
model = OpenEdXAppServer
fields = tuple(OpenEdXAppConfiguration.get_config_fie
|
lds()) + (
'configuration_database_settings',
'configuration_storage_settings',
'configuration_theme_settings',
'configuration_site_configuration_settings',
'common_configuration_settings',
'configuration_settings',
'instance',
'server',
)
def to_representation(self, instance):
"""
Add additional fields/data to the output
"""
output = AppServerBasicSerializer(instance, context=self.context).data
output.update(super().to_representation(instance))
return output
class OpenEdXAppServerLogSerializer(serializers.ModelSerializer):
"""
Provide the log entries for an OpenEdXAppServer
"""
log_entries = LogEntrySerializer(many=True, read_only=True)
log_error_entries = LogEntrySerializer(many=True, read_only=True)
class Meta:
model = OpenEdXAppServer
fields = ('log_entries', 'log_error_entries')
# create/update intentionally omitted, pylint: disable=abstract-method
class SpawnAppServerSerializer(serializers.Serializer):
"""
Serializer for the 'instance_id' argument to the "POST .../spawn/" view
"""
instance_id = serializers.IntegerField(label="Open edX Instance ID")
class Meta:
fields = ('instance_id', )
class OpenEdXReleaseSerializer(serializers.Serializer):
"""
Simple serializer for OpenEdX release names, that are
linked to AppServer instances.
"""
id = serializers.CharField(source='name')
name = serializers.CharField()
|
HeinerTholen/Varial
|
varial/test/test_tools.py
|
Python
|
gpl-3.0
| 3,530
| 0
|
#!/usr/bin/env python
import varial.tools
import unittest
import shutil
import os
class ResultCreator(varial.tools.Tool):
def run(self):
self.result = varial.wrp.Wrapper(name=self.name)
class ResultSearcher(varial.tools.Tool):
def __init__(self, name, input_path):
super(ResultSearcher, self).__init__(name)
self.input_path = input_path
def reuse(self):
with self.io.block_of_files:
self.result = self.io.get('result')
def run(self):
self.result = self.lookup_result(self.input_path)
class _Prntr(varial.tools.Tool):
def run(self):
varial.analysis.print_tool_tree()
class TestTools(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(TestTools, self).__init__(methodName)
self.base_name = 'VanillaChain'
def setUp(self):
super(TestTools, self).setUp()
varial.monitor.current_error_level = 2
def tearDown(self):
super(TestTools, self).tearDown()
varial.monitor.current_error_level = 0
if os.path.exists(self.base_name):
shutil.rmtree(self.base_name)
def _setup_chains(self, chain_class):
searchers = [
ResultSearcher(
'ResultSearcher0',
'../../../Creators/InnerCreators/ResultCreator'
),
ResultSearcher(
'ResultSearcher1',
'../.././../Creators/./InnerCreators/./ResultCreator'
),
ResultSearcher(
'ResultSearcher3',
'BaseChain/Creators/ResultCreator'
),
ResultSearcher(
'ResultSearcher4',
'/BaseChain/Creators/ResultCreator'
),
ResultSearcher(
'ResultSearcher5',
self.base_name + '/BaseChain/Creators/ResultCreator'
),
]
chain = varial.tools.ToolChain('BaseChain', [
chain_class('Creators', [
chain_class('InnerCreators', [
ResultCreator(),
]),
ResultCreator(),
]),
chain_class('Searchers', [
chain_class('InnerSearchers', searchers[:2]),
] + searchers[2:]),
])
return searchers, chain
def test_analysis_reset(self):
# this will reset analysis
varial.analysis.reset()
with varial.tools.ToolChainVanilla(self.base_name):
varial.analysis.fs_aliases.append('TESTVALUE')
self.assertListEqual(varial.analysis.fs_aliases, [])
def test_lookup_result(self):
searchers, chain = self._setup_chains(varial.tools.ToolChain)
chain = varial.tools.ToolChainVanilla(self.base_name, [chain])
varial.tools.Runner(chain)
for srchr in searchers:
self.assertIsNotNone(
srchr.result,
'Result not found for input_path: %s' % srchr.input_path
|
)
def test_lookup_result_parallel(self):
searchers, chain = self._setup_chains(varial.tools.ToolChainParallel)
chain = varial.tools.ToolChainVanilla(self.base_name, [chain])
varial.tools.Runner(chain)
for srchr in searchers:
self.assertIsNotNone(
srchr.result,
|
'Result not found for input_path: %s' % srchr.input_path
)
suite = unittest.TestLoader().loadTestsFromTestCase(TestTools)
if __name__ == '__main__':
unittest.main()
|
iivvoo/resturo
|
resturo/serializers.py
|
Python
|
isc
| 2,545
| 0
|
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from .models import EmailVerification, modelresolver
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email',
'verified', 'is_staff', 'is_superuser', 'is_active',
'date_joined')
read_only_fields = ('is_staff', 'is_superuser',
'is_active', 'date_joined',)
verified = serializers.SerializerMethodField()
def get_verified(self, obj):
try:
return obj.verification.verified
except EmailVerification.DoesNotExist:
return True
class UserCreateSerializer(serializers.ModelSerializer):
jwt_token = serializers.CharField(read_only=True)
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name',
'email', 'password', 'jwt_token')
extra
|
_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = self.Meta.model(
email=validated_data['email'],
username=validated_data['userna
|
me'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
# XXX should be jwt / token agnostic!
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
user.jwt_token = token
return user
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = modelresolver('Organization')
fields = ("id", "name")
class PasswordResetSerializer(serializers.Serializer):
class Meta:
model = modelresolver('Organization')
token = serializers.CharField()
password = serializers.CharField()
class InviteSerializer(serializers.Serializer):
handle = serializers.CharField()
strict = serializers.BooleanField()
role = serializers.IntegerField()
class JoinSerializer(serializers.Serializer):
JOIN_ACCEPT = 1
JOIN_REJECT = 2
token = serializers.CharField()
action = serializers.ChoiceField(choices=(JOIN_ACCEPT, JOIN_REJECT),
default=JOIN_ACCEPT)
|
EduPepperPD/pepper2013
|
common/lib/xmodule/xmodule/static_content.py
|
Python
|
agpl-3.0
| 6,505
| 0.001845
|
# /usr/bin/env python
"""
This module has utility functions for gathering up the static content
that is defined by XModules and XModuleDescriptors (javascript and css)
"""
import logging
import hashlib
import os
import errno
import sys
from collections import defaultdict
from docopt import docopt
from path import path
from xmodule.x_module import XModuleDescriptor
LOG = logging.getLogger(__name__)
def write_module_styles(output_root):
"""Write all registered XModule css, sass, and scss files to output root."""
return _write_styles('.xmodule_display', output_root, _list_modules())
def write_module_js(output_root):
"""Write all registered XModule js and coffee files to output root."""
return _write_js(output_root, _list_modules())
def write_descriptor_styles(output_root):
"""Write all registered XModuleDescriptor css, sass, and scss files to output root."""
return _write_styles('.xmodule_edit', output_root, _list_descriptors())
def write_descriptor_js(output_root):
"""Write all registered XModuleDescriptor js and coffee files to output root."""
return _write_js(output_root, _list_descriptors())
def _list_descriptors():
"""Return a list of all registered XModuleDescriptor classes."""
return [
desc for desc in [
desc for (_, desc) in XModuleDescriptor.load_classes()
]
]
def _list_modules():
"""Return a list of all registered XModule classes."""
return [
desc.module_class
for desc
in _list_descriptors()
]
def _ensure_dir(directory):
"""Ensure that `directory` exists."""
try:
os.makedirs(directory)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def _write_styles(selector, output_root, classes):
"""
Write the css fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
css_fragments = defaultdict(set)
for class_ in classes:
class_css = class_.get_css()
for filetype in ('sass', 'scss', 'css'):
for idx, fragment in enumerate(class_css.get(filetype, [])):
css_fragments[idx, filetype, fragment].add(class_.__name__)
css_imports = defaultdict(set)
for (idx, filetype, fragment), classes in sorted(css_fragments.items()):
fragment_name = "{idx:0=3d}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
# Prepend _ so that sass just includes the files into a single file
filename = '_' + fragment_name
contents[filename] = fragment
for class_ in classes:
css_imports[class_].add(fragment_name)
module_styles_lines = []
module_styles_lines.append("@import 'bourbon/bourbon';")
module_styles_lines.append("@import 'bourbon/addons/button';")
for class_, fragment_names in css_imports.items():
module_styles_lines.append("""{selector}.xmodule_{class_} {{""".format(
class_=class_, selector=selector
))
module_styles_lines.extend(' @import "{0}";'.format(name) for name in fragment_names)
module_styles_lines.append('}')
contents['_module-styles.scss'] = '\n'.join(module_styles_lines)
_write_files(output_root, contents)
def _write_js(output_root, classes):
"""
Write the javascript fragments from all XModules in `classes`
into `output_root` as individual files, hashed by the contents to remove
duplicates
"""
contents = {}
js_fragments = set()
for class_ i
|
n classes:
module_js = class_.get_javascript()
for filetype in ('coffee', 'js'):
for idx, fragment in enumerate(module_js.get(filetype, [])):
js_fragments.add((idx, filetype, fragment))
for idx, filetype, fragment in sorted(js_fragments):
filename = "{idx:0=3d
|
}-{hash}.{type}".format(
idx=idx,
hash=hashlib.md5(fragment).hexdigest(),
type=filetype)
contents[filename] = fragment
_write_files(output_root, contents, {'.coffee': '.js'})
return [output_root / filename for filename in contents.keys()]
def _write_files(output_root, contents, generated_suffix_map=None):
"""
Write file contents to output root.
Any files not listed in contents that exists in output_root will be deleted,
unless it matches one of the patterns in `generated_suffix_map`.
output_root (path): The root directory to write the file contents in
contents (dict): A map from filenames to file contents to be written to the output_root
generated_suffix_map (dict): Optional. Maps file suffix to generated file suffix.
For any file in contents, if the suffix matches a key in `generated_suffix_map`,
then the same filename with the suffix replaced by the value from `generated_suffix_map`
will be ignored
"""
_ensure_dir(output_root)
to_delete = set(file.basename() for file in output_root.files()) - set(contents.keys())
if generated_suffix_map:
for output_file in contents.keys():
for suffix, generated_suffix in generated_suffix_map.items():
if output_file.endswith(suffix):
to_delete.discard(output_file.replace(suffix, generated_suffix))
for extra_file in to_delete:
(output_root / extra_file).remove_p()
for filename, file_content in contents.iteritems():
output_file = output_root / filename
not_file = not output_file.isfile()
# not_file is included to short-circuit this check, because
# read_md5 depends on the file already existing
write_file = not_file or output_file.read_md5() != hashlib.md5(file_content).digest() # pylint: disable=E1121
if write_file:
LOG.debug("Writing %s", output_file)
output_file.write_bytes(file_content)
else:
LOG.debug("%s unchanged, skipping", output_file)
def main():
"""
Generate
Usage: static_content.py <output_root>
"""
args = docopt(main.__doc__)
root = path(args['<output_root>'])
write_descriptor_js(root / 'descriptors/js')
write_descriptor_styles(root / 'descriptors/css')
write_module_js(root / 'modules/js')
write_module_styles(root / 'modules/css')
if __name__ == '__main__':
sys.exit(main())
|
DaniFdezAlvarez/wikidataExplorer
|
wikidata_exp/wdexp/wikidata/commands/common_incoming_props_DUMP.py
|
Python
|
gpl-2.0
| 5,792
| 0.002762
|
__author__ = 'Dani'
import ijson
import json
PG_SCORE = "pg_score"
INCOMING_EDGES = "in_edges"
OUTCOMING_EDGES = "out_edges"
LABEL = "label"
DESCRIPTION = "desc"
class CommonIncomingCommand(object):
def __init__(self, source_dump_file, out_file, source_target_ids_file, topk_target_entities):
self._in_dump_file = source_dump_file
self._in_targets_file = source_target_ids_file
self._out_file = out_file
self._topk = topk_target_entities
self._edges_dict = {}
def _exec_command(self, string_return=False):
self._read_target_entities()
self._parse_dump_file()
if string_return:
return str(self._edges_dict)
else:
with open(self._out_file, "w") as out_stream:
json.dump(self._edges_dict, out_stream)
def _parse_dump_file(self):
json_stream = open(self._in_dump_file, "r")
elem_id = None
elem_type = None
desc_en = None
label_en = None
datatype = None
datavalue_type = None
current_claim_key = None
datavalue_num_id = None
possible_edges = []
elem_count = 1
for prefix, event, value in ijson.parse(json_stream):
if event == 'end_map':
if prefix == 'item':
for tuple_4 in possible_edges:
if self._is_valid_edge(elem_type, tuple_4[0],
tuple_4[1]): # triple: datatype, datavalue_type, datavalue_num_id
self._add_triple_if_proceed(elem_id, tuple_4[2], 'Q' + tuple_4[3], label_en, desc_en)
# print elem_id, tuple_4[2], 'Q' + tuple_4[3]
elem_id = None
elem_type = None
current_claim_key = None
# label_en = None
datavalue_num_id = None
datavalue_type = None
elem_count += 1
possible_edges = []
if elem_count % 10000 == 0:
print 'Llevamos ' + str(elem_count)
elif prefix == "item.claims." + str(current_claim_key) + ".item":
possible_edges.append((datatype, datavalue_type, current_claim_key, str(datavalue_num_id)))
elif event == 'string':
if prefix == 'item.id':
elem_id = value
elif prefix == 'item.type':
elem_type = value
elif prefix == 'item.claims.' + str(current_claim_key) + '.item.mainsnak.datatype':
datatype = value
elif prefix == 'item.claims.' + str(current_claim_key) + '.item.mainsnak.datavalue.value.entity-type':
datavalue_type = value
elif prefix == 'item.labels.en.value':
label_en = value
elif prefix == 'item.descriptions.en.value':
desc_en = value
elif event == 'map_key' and prefix == 'item.claims':
current_claim_key = value
elif event == 'number' and prefix == 'item.claims.' + str(
current_claim_key) + '.item.mainsnak.datavalue.value.numeric-id':
datavalue_num_id = value
def _read_target_entities(self):
with open(self._in_targets_file, "r") as in_stream:
success = 0
for line in in_stream: # JSON INDENTED, JUST A DICT
pieces = self._parse_key_and_rank(line)
if len(pieces) == 2:
self._edges_dict[pieces[0]] = {PG_SCORE: pieces[1],
INCOMING_EDGES: {},
OUTCOMING_EDGES: {}}
success += 1
if success >= self._topk:
break
@staticmethod
def _parse_key_and_rank(raw_string):
first_index = None
last_index = None
colon_index = None
i = 0
for char in raw_string:
if char == '"':
if not first_index:
first_index = i
else:
last_index = i
if char == ":":
colon_index = i
break
i += 1
if None not in [first_index, last_index, colon_index]:
return str(raw_string[first_index + 1:last_index]), str(raw_string[colon_index + 2:-2])
else:
return ()
@staticmethod
def _is_valid_edge(subj_type, data_nature, data_type):
if subj_type == 'item' and data_nature == 'wikibase-item' and data_type == 'item':
return True
return False
def _add_triple_if_proceed(self, origin, property, target, label_origin, desc_origin):
if origin in self._edges_dict:
if LABEL not in self._edges_dict[origin]:
self._edges_dict[origin][LABEL] = label_origin
if DESCRIPTION not in self._edges_dict[origin]:
self._edges_dict[origin][DESCRIPTION] = desc_origin
if property not in self._edges_dict[origin][OUTCOMING_EDGES]:
self._edges_dict[origin][OUTCOMING_EDGES][property] = 0
self._edges_dict[origin][OUTCOMING_EDGES][property] += 1
|
if target in self._edges_dict:
if property not in self._edges_dict[target][INCOMING_EDGES]:
self._edges_dict[target][INCOMING_EDGES][property] = 0
self._edges_dict[ta
|
rget][INCOMING_EDGES][property] += 1
|
fprados/nipype
|
nipype/interfaces/freesurfer/model.py
|
Python
|
bsd-3-clause
| 41,958
| 0.004886
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
__docformat__ = 'restructuredtext'
import os
from nipype.utils.filemanip import fname_presuffix, split_filename
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
OutputMultiPath, Directory, isdefined)
class MRISPreprocInputSpec(FSTraitedSpec):
out_file = File(argstr='--out %s', genfile=True,
desc='output filename')
target = traits.Str(argstr='--target %s', mandatory=True,
desc='target subject name')
hemi = traits.Enum('lh', 'rh', argstr='--hemi %s',
mandatory=True,
desc='hemisphere for source and target')
surf_measure = traits.Str(argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Use subject/surf/hemi.surf_measure as input')
surf_area = traits.Str(argstr='--area %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Extract vertex area from subject/surf/hemi.surfname to use as input.')
subjects = traits.List(argstr='--s %s...',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='subjects from who measures are calculated')
fsgd_file = File(exists=True, argstr='--fsgd %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='specify subjects using fsgd file')
subject_file = File(exists=True, argstr='--f %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='file specifying subjects separate
|
d by white space')
surf_measure_file = InputMultiPath(File(exists=True), argstr='--is %s...',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file alternative to surfmeas, still requires list of subjects')
source_format = traits.Str(argstr='--srcfmt %s', desc='source format')
surf_dir = traits.Str(argstr='--surfdir %s',
desc='alternative directory (instead of surf
|
)')
vol_measure_file = InputMultiPath(traits.Tuple(File(exists=True),
File(exists=True)),
argstr='--iv %s %s...',
desc='list of volume measure and reg file tuples')
proj_frac = traits.Float(argstr='--projfrac %s',
desc='projection fraction for vol2surf')
fwhm = traits.Float(argstr='--fwhm %f',
xor=['num_iters'],
desc='smooth by fwhm mm on the target surface')
num_iters = traits.Int(argstr='--niters %d',
xor=['fwhm'],
desc='niters : smooth by niters on the target surface')
fwhm_source = traits.Float(argstr='--fwhm-src %f',
xor=['num_iters_source'],
desc='smooth by fwhm mm on the source surface')
num_iters_source = traits.Int(argstr='--niterssrc %d',
xor=['fwhm_source'],
desc='niters : smooth by niters on the source surface')
smooth_cortex_only = traits.Bool(argstr='--smooth-cortex-only',
desc='only smooth cortex (ie, exclude medial wall)')
class MRISPreprocOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='preprocessed output file')
class MRISPreproc(FSCommand):
"""Use FreeSurfer mris_preproc to prepare a group of contrasts for
a second level analysis
Examples
--------
>>> preproc = MRISPreproc()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
_cmd = 'mris_preproc'
input_spec = MRISPreprocInputSpec
output_spec = MRISPreprocOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.out_file
outputs['out_file'] = outfile
if not isdefined(outfile):
outputs['out_file'] = os.path.join(os.getcwd(),
'concat_%s_%s.mgz' % (self.inputs.hemi,
self.inputs.target))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class GLMFitInputSpec(FSTraitedSpec):
glm_dir = traits.Str(argstr='--glmdir %s', desc='save outputs to dir',
genfile=True)
in_file = File(desc='input 4D file', argstr='--y %s', mandatory=True,
copyfile=False)
_design_xor = ('fsgd', 'design', 'one_sample')
fsgd = traits.Tuple(File(exists=True), traits.Enum('doss', 'dods'),
argstr='--fsgd %s %s', xor=_design_xor,
desc='freesurfer descriptor file')
design = File(exists=True, argstr='--X %s', xor=_design_xor,
desc='design matrix file')
contrast = InputMultiPath(File(exists=True), argstr='--C %s...',
desc='contrast file')
one_sample = traits.Bool(argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
desc='construct X and C as a one-sample group mean')
no_contrast_sok = traits.Bool(argstr='--no-contrasts-ok',
desc='do not fail if no contrasts specified')
per_voxel_reg = InputMultiPath(File(exists=True), argstr='--pvr %s...',
desc='per-voxel regressors')
self_reg = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--selfreg %d %d %d',
desc='self-regressor from index col row slice')
weighted_ls = File(exists=True, argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
desc='weighted least squares')
fixed_fx_var = File(exists=True, argstr='--yffxvar %s',
desc='for fixed effects analysis')
fixed_fx_dof = traits.Int(argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
desc='dof for fixed effects analysis')
fixed_fx_dof_file = File(argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
desc='text file with dof for fixed effects analysis')
weight_file = File(exists=True, xor=['weighted_ls'],
desc='weight for each input at each voxel')
weight_inv = traits.Bool(argstr='--w-inv', desc='invert weights',
xor=['weighted_ls'])
weight_sqrt = traits.Bool(argstr='--w-sqrt', desc='sqrt of weights',
xor=['weighted_ls'])
fwhm = traits.Range(low=0.0, argstr='--fwhm %f',
desc='smooth input by fwhm')
var_fwhm = traits.Range(low=0.0, argstr='--var-fwhm %f',
desc='smooth variance by fwhm')
no_mask_smooth = traits.Bool(argstr='--no-mask-smooth',
desc='do not mask when smoothing')
no_est_fwhm = traits.Bool(argstr='--no-est-fwhm',
desc='turn off FWHM o
|
ahmedaljazzar/edx-platform
|
openedx/core/djangoapps/waffle_utils/tests/test_init.py
|
Python
|
agpl-3.0
| 5,739
| 0.003311
|
"""
Tests for waffle utils features.
"""
import crum
import ddt
from django.test import TestCase
from django.test.client import RequestFactory
from edx_django_utils.cache import RequestCache
from mock import patch
from opaque_keys.edx.keys import CourseKey
from waffle.testutils import override_flag
from .. import CourseWaffleFlag, WaffleFlagNamespace, WaffleSwitchNamespace, WaffleSwitch
from ..models import WaffleFlagCourseOverrideModel
@ddt.ddt
class TestCourseWaffleFlag(TestCase):
"""
Tests the CourseWaffleFlag.
"""
NAMESPACE_NAME = "test_namespace"
FLAG_NAME = "test_flag"
NAMESPACED_FLAG_NAME = NAMESPACE_NAME + "." + FLAG_NAME
TEST_COURSE_KEY = CourseKey.from_string("edX/DemoX/Demo_Course")
TEST_COURSE_2_KEY = CourseKey.from_string("edX/DemoX/Demo_Course_2")
TEST_NAMESPACE = WaffleFlagNamespace(NAMESPACE_NAME)
TEST_COURSE_FLAG = CourseWaffleFlag(TEST_NAMESPACE, FLAG_NAME)
def setUp(self):
super(TestCourseWaffleFlag, self).setUp()
request = RequestFactory().r
|
equest()
self.addCleanup(crum.set_current_request, None)
crum.set_current_request(request)
RequestCache.clear_all_namespaces()
@ddt.data(
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on, 'waffle_enabled': False, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.off, 'waffle_enabled
|
': True, 'result': False},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': True, 'result': True},
{'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.unset, 'waffle_enabled': False, 'result': False},
)
def test_course_waffle_flag(self, data):
"""
Tests various combinations of a flag being set in waffle and overridden
for a course.
"""
with patch.object(WaffleFlagCourseOverrideModel, 'override_value', return_value=data['course_override']):
with override_flag(self.NAMESPACED_FLAG_NAME, active=data['waffle_enabled']):
# check twice to test that the result is properly cached
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_KEY), data['result'])
# result is cached, so override check should happen once
WaffleFlagCourseOverrideModel.override_value.assert_called_once_with(
self.NAMESPACED_FLAG_NAME,
self.TEST_COURSE_KEY
)
# check flag for a second course
if data['course_override'] == WaffleFlagCourseOverrideModel.ALL_CHOICES.unset:
# When course override wasn't set for the first course, the second course will get the same
# cached value from waffle.
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_2_KEY), data['waffle_enabled'])
else:
# When course override was set for the first course, it should not apply to the second
# course which should get the default value of False.
self.assertEqual(self.TEST_COURSE_FLAG.is_enabled(self.TEST_COURSE_2_KEY), False)
@ddt.data(
{'flag_undefined_default': None, 'result': False},
{'flag_undefined_default': False, 'result': False},
{'flag_undefined_default': True, 'result': True},
)
def test_undefined_waffle_flag(self, data):
"""
Test flag with various defaults provided for undefined waffle flags.
"""
test_course_flag = CourseWaffleFlag(
self.TEST_NAMESPACE,
self.FLAG_NAME,
flag_undefined_default=data['flag_undefined_default']
)
with patch.object(
WaffleFlagCourseOverrideModel,
'override_value',
return_value=WaffleFlagCourseOverrideModel.ALL_CHOICES.unset
):
# check twice to test that the result is properly cached
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
# result is cached, so override check should happen once
WaffleFlagCourseOverrideModel.override_value.assert_called_once_with(
self.NAMESPACED_FLAG_NAME,
self.TEST_COURSE_KEY
)
@ddt.data(
{'flag_undefined_default': None, 'result': False},
{'flag_undefined_default': False, 'result': False},
{'flag_undefined_default': True, 'result': True},
)
def test_without_request(self, data):
"""
Test the flag behavior when outside a request context.
"""
crum.set_current_request(None)
test_course_flag = CourseWaffleFlag(
self.TEST_NAMESPACE,
self.FLAG_NAME,
flag_undefined_default=data['flag_undefined_default']
)
self.assertEqual(test_course_flag.is_enabled(self.TEST_COURSE_KEY), data['result'])
class TestWaffleSwitch(TestCase):
"""
Tests the WaffleSwitch.
"""
NAMESPACE_NAME = "test_namespace"
WAFFLE_SWITCH_NAME = "test_switch_name"
TEST_NAMESPACE = WaffleSwitchNamespace(NAMESPACE_NAME)
WAFFLE_SWITCH = WaffleSwitch(TEST_NAMESPACE, WAFFLE_SWITCH_NAME)
def test_namespaced_switch_name(self):
"""
Verify namespaced_switch_name returns the correct namespace switch name
"""
expected = self.NAMESPACE_NAME + "." + self.WAFFLE_SWITCH_NAME
actual = self.WAFFLE_SWITCH.namespaced_switch_name
self.assertEqual(actual, expected)
|
tschalch/pyTray
|
src/lib/reportlab/graphics/samples/scatter_lines_markers.py
|
Python
|
bsd-3-clause
| 3,824
| 0.02432
|
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.charts.lineplots import ScatterPlot
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
from excelcolors import *
class ScatterLinesMarkers(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
apply(Drawing.__init__,(self,width,height)+args,kw)
self._add(self,ScatterPlot(),name='chart',validate=None,desc="The main chart")
self.chart.width = 115
self.chart.height = 80
self.chart.x = 30
self.chart.y = 40
self.chart.lines[0].strokeColor = color01
self.chart.lines[1].strokeColor = color02
self.chart.lines[2].strokeColor = color03
self.chart.lines[3].strokeColor = color04
self.chart.lines[4].strokeColor = color05
self.chart.lines[5].strokeColor = color06
self.chart.lines[6].strokeColor = color07
self.chart.lines[7].strokeColor = color08
self.chart.lines[8].strokeColor = color09
self.chart.lines[9].strokeColor = color10
self.chart.fillColor = backgroundGrey
self.chart.lineLabels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontSize = 7
self.chart.xValueAxis.forceZero = 0
self.chart.data = [((100,100), (200,200), (250,210), (300,300), (400,500)), ((100,200), (200,300), (250,200), (300,400), (400, 600))]
self.chart.xValueAxis.avoidBoundFrac = 1
self.chart.xValueAxis.gridEnd = 115
self.chart.xValueAxis.tickDown = 3
self.chart.xValueAxis.visibleGrid = 1
self.chart.yValueAxis.tickLeft = 3
self.chart.yValueAxis.labels.fontName = 'Helvetica'
self.chart.yValueAxis.labels.fontSize = 7
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets'), (color02, 'Sprockets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextS
|
pace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self.chart.lineLabelFormat = None
self.chart.xLabel = 'X Axis'
self.chart.y = 30
self.chart.yLabel = 'Y Axis'
self.chart.yValueAxis.gridEnd = 115
self.chart.yValueAxis.visibleGrid = 1
self.chart.yValueA
|
xis.labelTextFormat = '%d'
self.chart.yValueAxis.forceZero = 1
self.chart.xValueAxis.forceZero = 1
self.chart.joinedLines = 1
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
ScatterLinesMarkers().save(formats=['pdf'],outDir=None,fnRoot='scatter_lines_markers')
|
ging/horizon
|
openstack_dashboard/dashboards/project/instances/tabs.py
|
Python
|
apache-2.0
| 3,495
| 0
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.dashboards.project.instances \
import audit_tables as a_tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances import console
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = ("project/instances/"
"_detail_overview.html")
def get_context_data(self, request):
return {"instance": self.tab_group.kwargs['instance']}
class LogTab(tabs.Tab):
name = _("Log")
slug = "log"
template_name = "project/instances/_detail_log.html"
preload = False
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
try:
data = api.nova.server_console_output(request,
instance.id,
tail_length=35)
except Exception:
data = _('Unable to get log for instance "%s".') % instance.id
exceptions.handle(request, ignore=True)
|
return {"instance": instance,
"cons
|
ole_log": data}
class ConsoleTab(tabs.Tab):
name = _("Console")
slug = "console"
template_name = "project/instances/_detail_console.html"
preload = False
def get_context_data(self, request):
instance = self.tab_group.kwargs['instance']
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
console_url = None
try:
console_url = console.get_console(request, console_type, instance)
except exceptions.NotAvailable:
exceptions.handle(request, ignore=True, force_log=True)
return {'console_url': console_url, 'instance_id': instance.id}
def allowed(self, request):
# The ConsoleTab is available if settings.CONSOLE_TYPE is not set at
# all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True))
class AuditTab(tabs.TableTab):
name = _("Action Log")
slug = "audit"
table_classes = (a_tables.AuditTable,)
template_name = "project/instances/_detail_audit.html"
preload = False
def get_audit_data(self):
actions = []
try:
actions = api.nova.instance_action_list(
self.request, self.tab_group.kwargs['instance_id'])
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve instance action list.'))
return sorted(actions, reverse=True, key=lambda y: y.start_time)
class InstanceDetailTabs(tabs.TabGroup):
slug = "instance_details"
tabs = (OverviewTab, LogTab, ConsoleTab, AuditTab)
sticky = True
|
danallan/octal-application
|
server/apps/maps/forms.py
|
Python
|
gpl-3.0
| 3,363
| 0.007136
|
from django import forms
from django.contrib.auth.hashers import check_password
from models import Graphs, Concepts
from utils import graphCheck, GraphIntegrityError
import json
class GraphForm(forms.ModelForm):
json_input = forms.BooleanField(label=("JSON Input"), required=False,
help_text=("Check this box to define the graph with raw JSON instead of the graph editor."))
json_data = forms.CharField(label=("Graph JSON"), required=False,
help_text=("Copy-paste or type the JSON representation of your graph here."),
widget=forms.Textarea(attrs={'cols':80, 'rows':10}))
def clean_json_data(self):
"""
Validate JSON as being kmap structure
"""
json_data = self.cleaned_data['json_data'].strip()
if not json_data:
raise forms.Validatio
|
nError("Error: graph cannot be blank")
try:
graph_list = json.loads(json_data)
return graphCheck(graph_list)
except ValueError:
raise forms.ValidationError("Error: malformed JSON")
except GraphIntegrityError as e:
raise forms.ValidationError("Error: %(val)s", params={'val':e})
class Meta:
model = Graphs
fields = ['name', 'description', 'public', 'study_active', 'json_input', 'json_d
|
ata', 'lti_key', 'lti_secret', 'secret']
labels = {
'name': ("Unit Name"),
'study_active': ("Research study"),
'lti_key': ("Consumer Key"),
'lti_secret': ("Shared Secret"),
}
help_texts = {
'public': ("Public units are displayed on the unit list. Private units will be accessible by anyone with the URL."),
'secret': ("The secret is used to modify the unit in the future. Please remember the value of this field!"),
'study_active': ("Check this only if you plan to use this unit as part of a research investigation."),
}
widgets = {
'name': forms.TextInput(attrs={'size':40}),
'description': forms.Textarea(attrs={'cols':40, 'rows':2}),
'secret': forms.HiddenInput(),
'lti_key': forms.HiddenInput(),
'lti_secret': forms.HiddenInput(),
}
class KeyForm(forms.Form):
"""
This form passes along data to ensure the user has authority to edit a map
"""
secret = forms.CharField(max_length=16, label=("Secret Key"),
widget=forms.TextInput(attrs={
'autocomplete':'off',
'autocorrect':'off',
'autocapitalize':'off',
'autofocus':'autofocus',
}))
edited = forms.BooleanField(required=False, initial=False,
widget=forms.HiddenInput())
def clean(self):
"""
When validating the form, compare the key against the graph's secret
"""
cleaned_data = super(KeyForm, self).clean()
if not check_password(cleaned_data.get("secret"), self._graph.secret):
raise forms.ValidationError("Incorrect secret")
return cleaned_data
def __init__(self, *args, **kwargs):
self._graph = kwargs.pop('graph')
super(KeyForm, self).__init__(*args, **kwargs)
|
gkotton/vmware-nsx
|
vmware-nsx/neutron/tests/unit/vmware/apiclient/test_api_eventlet_request.py
|
Python
|
apache-2.0
| 12,642
| 0
|
# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import random
import eventlet
from eventlet.green import urllib2
import mock
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import eventlet_client as client
from neutron.plugins.vmware.api_client import eventlet_request as request
from neutron.tests import base
from neutron.tests.unit import vmware
LOG = logging.getLogger(__name__)
REQUEST_TIMEOUT = 1
def fetch(url):
return urllib2.urlopen(url).read()
class ApiRequestEventletTest(base.BaseTestCase):
def setUp(self):
super(ApiRequestEventletTest, self).setUp()
self.client = client.EventletApiClient(
[("127.0.0.1", 4401, True)], "admin", "admin")
self.url = "/ws.v1/_debug"
self.req = request.EventletApiRequest(self.client, self.url)
def tearDown(self):
self.client = None
self.req = None
super(ApiRequestEventletTest, self).tearDown()
def test_construct_eventlet_api_request(self):
e = request.EventletApiRequest(self.client, self.url)
self.assertIsNotNone(e)
def test_apirequest_spawn(self):
def x(id):
eventlet.greenthread.sleep(random.random())
LOG.info(_LI('spawned: %d'), id)
for i in range(10):
request.EventletApiRequest._spawn(x, i)
def test_apirequest_start(self):
for i in range(10):
a = request.EventletApiRequest(
self.client, self.url)
a._handle_request = mock.Mock()
a.start()
eventlet.greenthread.sleep(0.1)
LOG.info(_LI('_handle_request called: %s'),
a._handle_request.called)
request.EventletApiRequest.joinall()
def test_join_with_handle_request(self):
self.req._handle_request = mock.Mock()
self.req.start()
self.req.join()
self.assertTrue(self.req._handle_request.called)
def test_join_without_handle_request(self):
self.req._handle_request = mock.Mock()
self.req.join()
self.assertFalse(self.req._handle_request.called)
def test_copy(self):
req = self.req.copy()
for att in [
'_api_client', '_url', '_method', '_body', '_headers',
'_http_timeout', '_request_timeout', '_retries',
'_redirects', '_auto_login']:
self.assertTrue(getattr(req, att) is getattr(self.req, att))
def test_request_error(self):
self.assertIsNone(self.req.request_error)
def test_run_and_handle_request(self):
self.req._request_timeout = None
self.req._handle_request = mock.Mock()
self.req.start()
self.req.join()
self.assertTrue(self.req._handle_request.called)
def test_run_and_timeout(self):
def my_handle_request():
LOG.info('my_handle_request() self: %s' % self.req)
LOG.info('my_handle_request() dir(self): %s' % dir(self.req))
eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
with mock.patch.object(
self.req,
'_handle_request',
new=my_handle_request
):
self.req._request_timeout = REQUEST_TIMEOUT
self.req.start()
self.assertIsNone(self.req.join())
def prep_issue_request(self):
mysock = mock.Mock()
mysock.gettimeout.return_value = 4242
myresponse = mock.Mock()
myresponse.read.return_value = 'body'
myresponse.getheaders.return_value = 'headers'
myresponse.status = httplib.MOVED_PERMANENTLY
myconn = mock.Mock()
myconn.request.return_value = None
myconn.sock = mysock
myconn.getresponse.return_value = myresponse
myconn.__str__ = mock.Mock()
myconn.__str__.return_value = 'myconn string'
req = self.req
req._redirect_params = mock.Mock()
req._redirect_params.return_value = (myconn, 'url')
req._request_str = mock.Mock()
req._request_str.return_value = 'http://cool/cool'
client = self.client
client.need_login = False
client._auto_login = False
client._auth_cookie = False
client.acquire_connection = mock.Mock()
client.acquire_connection.return_value = myconn
client.release_connection = mock.Mock()
|
return (mysock, myresponse, myconn)
def test_issue_request_trigger_exception(self):
(mysock, myresponse, myconn) = self.prep_issue_request()
self.client.acquire_connection.return_value = None
self.req._issue_request
|
()
self.assertIsInstance(self.req._request_error, Exception)
self.assertTrue(self.client.acquire_connection.called)
def test_issue_request_handle_none_sock(self):
(mysock, myresponse, myconn) = self.prep_issue_request()
myconn.sock = None
self.req.start()
self.assertIsNone(self.req.join())
self.assertTrue(self.client.acquire_connection.called)
def test_issue_request_exceed_maximum_retries(self):
(mysock, myresponse, myconn) = self.prep_issue_request()
self.req.start()
self.assertIsNone(self.req.join())
self.assertTrue(self.client.acquire_connection.called)
def test_issue_request_trigger_non_redirect(self):
(mysock, myresponse, myconn) = self.prep_issue_request()
myresponse.status = httplib.OK
self.req.start()
self.assertIsNone(self.req.join())
self.assertTrue(self.client.acquire_connection.called)
def test_issue_request_trigger_internal_server_error(self):
(mysock, myresponse, myconn) = self.prep_issue_request()
self.req._redirect_params.return_value = (myconn, None)
self.req.start()
self.assertIsNone(self.req.join())
self.assertTrue(self.client.acquire_connection.called)
def test_redirect_params_break_on_location(self):
myconn = mock.Mock()
(conn, retval) = self.req._redirect_params(
myconn, [('location', None)])
self.assertIsNone(retval)
def test_redirect_params_parse_a_url(self):
myconn = mock.Mock()
(conn, retval) = self.req._redirect_params(
myconn, [('location', '/path/a/b/c')])
self.assertIsNotNone(retval)
def test_redirect_params_invalid_redirect_location(self):
myconn = mock.Mock()
(conn, retval) = self.req._redirect_params(
myconn, [('location', '+path/a/b/c')])
self.assertIsNone(retval)
def test_redirect_params_invalid_scheme(self):
myconn = mock.Mock()
(conn, retval) = self.req._redirect_params(
myconn, [('location', 'invalidscheme://hostname:1/path')])
self.assertIsNone(retval)
def test_redirect_params_setup_https_with_cooki(self):
with mock.patch(vmware.CLIENT_NAME) as mock_client:
api_client = mock_client.return_value
self.req._api_client = api_client
myconn = mock.Mock()
(conn, retval) = self.req._redirect_params(
myconn, [('location', 'https://host:1/path')])
self.assertIsNotNone(retval)
self.assertTrue(api_client.acquire_redirect_connection.called)
def test_redirect_params_setup_htttps_and_query(self):
with mock.patch(vmware.CLIENT_NAME) as mock_client:
api_client = mock_client.return_value
self.req._api_client = api_client
myconn = mock.Mock(
|
oxc/Flexget
|
flexget/plugins/list/sonarr_list.py
|
Python
|
mit
| 11,933
| 0.003017
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import json
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('sonarr_list')
class SonarrSet(MutableSet):
supported_ids = ['tvdb_id', 'tvrage_id', 'tvmaze_id', 'imdb_id', 'slug', 'sonarr_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def series_request_builder(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series list request')
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def lookup_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series lookup request')
url = '%s://%s:%s%s/api/series/lookup?term=' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def profile_list_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
url = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def rootfolder_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received rootfolder list request')
url = '%s://%s:%s%s/api/Rootfolder' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def get_json(self, url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def post_json(self, url, headers, data):
try:
response = requests.post(url, headers=headers, data=data)
if response.status_code == 201:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def request_builder(self, base_url, request_type, port, api_key):
if request_type == 'series':
return self.series_request_builder(base_url, port, api_key)
elif request_type == 'profile':
return self.profile_list_request(base_url, port, api_key)
elif request_type == 'lookup':
return self.lookup_request(base_url, port, api_key)
elif request_type == 'rootfolder':
return self.rootfolder_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
def translate_quality(self, quality_name):
"""
Translate Sonnar's qualities to ones recognize by Flexget
"""
if quality_name == 'Raw-HD': # No better match yet in Flexget
return 'remux'
elif quality_name == 'DVD': # No better match yet in Flexget
return 'dvdrip'
else:
return quality_name.replace('-', ' ').lower()
def quality_requirement_builder(self, quality_profile):
allowed_qualities = [self.translate_quality(quality['quality']['name']) for quality in quality_profile['items']
if quality['allowed']]
cutoff = self.translate_quality(quality_profile['cutoff']['name'])
return allowed_qualities, cutoff
def list_entries(self):
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
json = self.get_json(series_url, series_headers)
# Retrieves Sonarr's profile list if include_data is set to true
if self.config.get('include_data'):
profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
self.config.get('port'),
self.config['api_key'])
profiles_json = self.get_json(profile_url, profile_headers)
entries = []
for show in json:
fg_qualities = '' # Initial
|
izes the quality parameter
fg_cutoff = ''
path = None
if not show['monitored'] and self.config.get(
'only_monitored'): # Checks if to retrieve just monitored shows
continue
if show['status'] == 'ended' and not self.config.get('include_ended'): # Checks if to retrieve ended shows
continue
if se
|
lf.config.get('include_data') and profiles_json: # Check if to retrieve quality & path
path = show.get('path')
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
tvmaze_id=show.get('tvMazeId'),
imdb_id=show.get('imdbid'),
slug=show.get('titleSlug'),
sonarr_id=show.get('id'),
configure_series_target=fg_cutoff)
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if path:
entry['configure_series_path'] = path
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
return entries
def add_show(self, entry):
log.debug('searching for show match for %s using Sonarr', entry)
lookup_series_url, lookup_series_headers = self.request_builder(self.config.get('base_url'), 'lookup',
self.config.get('port'), self.config['api_key'])
if entry.get('tvdb_id'):
lookup_series_url += 'tvdb:%s' % entry.get('tvdb_id')
else:
lookup_series_url += entry.get('title')
lookup_results = self.get_json(lookup_series_url, headers=lookup_series_headers)
if not lookup_results:
log.debug('could not find series match to %s', entry)
return
else:
if len(lookup_
|
Commonists/SurfaceImageContentGap
|
surfaceimagecontentgap/__init__.py
|
Python
|
mit
| 59
| 0
|
""" Surface image content gap. "
|
""
__version__
|
= '1.3-dev'
|
Einsteinish/PyTune3
|
utils/feedfinder.py
|
Python
|
mit
| 13,462
| 0.008394
|
"""feedfinder: Find the Web feed for a Web page
http://www.aaronsw.com/2002/feedfinder/
Usage:
feed(uri) - returns feed found for a URI
feeds(uri) - returns all feeds found for a URI
>>> import feedfinder
>>> feedfinder.feed('scripting.com')
'http://scripting.com/rss.xml'
>>>
>>> feedfinder.feeds('scripting.com')
['http://delong.typepad.com/sdj/atom.xml',
'http://delong.typepad.com/sdj/index.rdf',
'http://delong.typepad.com/sdj/rss.xml']
>>>
Can also use from the command line. Feeds are returned one per line:
$ python feedfinder.py diveintomark.org
http://diveintomark.org/xml/atom.xml
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
6. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
7. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
8. As a last ditch effort, we search Syndic8 for feeds matching the URI
"""
__version__ = "1.371"
__date__ = "2006-04-24"
__maintainer__ = "Aaron Swartz (me@aaronsw.com)"
__author__ = "Mark Pilgrim (http://diveintomark.org)"
__copyright__ = "Copyright 2002-4, Mark Pilgrim; 2006 Aaron Swartz"
__license__ = "Python"
__credits__ = """Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches"""
_debug = 0
import sgmllib, urllib, urlparse, re, sys, robotparser
import requests
from StringIO import StringIO
from lxml import etree
# XML-RPC support allows feedfinder to query Syndic8 for possible matches.
# Python 2.3 now comes with this module by default, otherwise you can download it
try:
import xmlrpclib # http://www.pythonware.com/products/xmlrpc/
except ImportError:
xmlrpclib = None
if not dict:
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
def _debuglog(message):
if _debug: print message
class URLGatekeeper:
"""a class to track robots.txt rules across multiple servers"""
def __init__(self):
self.rpcache = {} # a dictionary of RobotFileParser objects, by domain
self.urlopener = urllib.FancyURLopener()
self.urlopener.version = "PyTune Feed Finder (Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 Safari/534.48.3)"
_debuglog(self.urlopener.version)
self.urlopener.addheaders = [('User-Agent', self.urlopener.version)]
# self.urlopener.addheaders = [('User-Agent', self.urlopener.version), ('Accept', '*')]
robotparser.URLopener.version = self.urlopener.version
robotparser.URLopener.addheaders = self.urlopener.addheaders
def _getrp(self, url):
protocol, domain = urlparse.urlparse(url)[:2]
if self.rpcache.has_key(domain):
return self.rpcache[domain]
baseurl = '%s://%s' % (protocol, domain)
robotsurl = urlparse.urljoin(baseurl, 'robots.txt')
_debuglog('fetching %s' % robotsurl)
rp = robotparser.RobotFileParser(robotsurl)
try:
rp.read()
except:
pass
self.rpcache[domain] = rp
return rp
def can_fetch(self, url):
rp = self._getrp(url)
allow = rp.can_fetch(self.urlopener.version, url)
_debuglog("gatekeeper of %s says %s" % (url, allow))
return allow
def get(self, url, check=False):
if check and not self.can_fetch(url): return ''
try:
return requests.get(url, headers=dict(self.urlopener.addheaders)).content
except:
return ''
_gatekeeper = URLGatekeeper()
class BaseParser(sgmllib.SGMLParser):
def __init__(self, baseuri):
sgmllib.SGMLParser.__init__(self)
self.links = []
self.baseuri = baseuri
def normalize_attrs(self, attrs):
def cleanattr(v):
v = sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v)
if not v: return
v = v.strip()
v = v.replace('<', '<').replace('>', '>').replace(''', "'").replace('"', '"').replace('&', '&')
return v
attrs = [(k.lower(), cleanattr(v)) for k, v in attrs if cleanattr(v)]
attrs = [(k, k in ('rel','type') and v.lower() or v) for k, v in attrs if cleanattr(v)]
return attrs
def do_base(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.baseuri = attrsD['href']
def error(self, *a, **kw): pass # we're not picky
class LinkParser(BaseParser):
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
def do_link(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('rel'): return
rels = attrsD['rel'].split()
if 'alternate' not in rels: return
if attrsD.get('type') not in self.FEED_TYPES: return
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
class ALinkParser(BaseParser):
def start_a(self, attrs):
attrsD = dict(self.normalize_attrs(attrs))
if not attrsD.has_key('href'): return
self.links.append(urlparse.urljoin(self.baseuri, attrsD['href']))
def makeFullURI(uri):
if not uri: return
uri = uri.strip()
if uri.startswith('feed://'):
uri = 'http://' + uri.split('feed://', 1).pop()
for x in ['http', 'https']:
if uri.startswith('%s://' % x):
return uri
return 'http://%s' % uri
def getLinks(data, baseuri):
p = LinkParser(baseuri)
p.feed(data)
return p.links
def getLinksLXML(data, baseuri):
parser = etree.HTMLParser(recover=True)
tree = etree.parse(StringIO(data), parser)
links = []
for link in tree.findall('.//link'):
if link.attrib.get('type') in LinkParser.FEED_TYPES:
href = link.attrib['href']
if href: links.append(href)
return links
def getALinks(data, baseuri):
p = ALinkParser(baseuri)
p.feed(data)
return p.links
def getLocalLinks(links, baseuri):
found_links = []
if not baseuri: return found_links
baseuri = baseuri.lower()
for l in links:
try:
if l.lower().startswith(baseuri):
found_links.append(l)
except (AttributeError, UnicodeDecodeError):
pass
return found_links
def isFeedLink(link):
return link[-4:].lower() in ('.rss', '.rdf', '.xml', '.atom')
def isXMLRelatedLink(link):
link = link.lower()
return link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom')
r_brokenRedirect = re.compile('<newLocation[^>]*>(.*?)</newLocation>', re.S)
def tryBrokenRedirect(data):
if '<newLocation' in data:
newuris = r_brokenRedirect.findall(data)
if newuris and newuris[0]: return newuris[0].strip()
def couldBeFeedData(data):
data = data.lower()
if data.count('<html'): return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def isFeed(uri):
_debuglog('seeing if %s is a feed' % uri)
protocol = urlparse.urlparse(uri)
if protocol[0] not i
|
n ('http', 'https'): return 0
try:
data = _gatekeeper.get(uri, check=False)
except (KeyError, UnicodeDecodeError):
return False
count = couldBeFeedData(data)
return count
def sortFeeds(feed1Info, feed2Info):
return cmp(feed2Info[
|
'headlines_rank'], feed1I
|
shucommon/little-routine
|
python/AI/opencv/gui/mouse.py
|
Python
|
gpl-3.0
| 477
| 0.035639
|
import numpy as np
import cv2 as cv
# mouse callback function
def draw_circle(event,x,y,flags,param):
|
if event == cv.EVENT_LBUTTONDBLCLK:
cv.circle(img,(x,y),50,(255,0,0),-1)
# Create a black image, a window and bind the function to window
img = np.zeros((512,512,3), np.uint8)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
while(1):
cv.imshow('image',img)
# esc
if c
|
v.waitKey(20) & 0xFF == 27:
break
cv.destroyAllWindows()
|
yochow/autotest
|
client/tests/cpuset_tasks/cpuset_tasks.py
|
Python
|
gpl-2.0
| 939
| 0.003195
|
import os, time
import subprocess
from autotest_lib.client.bin import test
from autotest_lib.client.common_lib import utils, error
class cpuset_tasks(test.test):
version = 1
preserve_srcdir =
|
True
def initialize(self):
self.job.require_gcc()
def setup(self):
os.chdir(self.srcdir)
utils.system('make')
def execute(self):
os.chdir(self.tmpdir)
tasks_bin = os.path.join(self.srcdir, 'tasks')
p = subprocess.Popen([tasks_bin, ' 25000'])
time.sleep(5)
try:
result = utils.run('cat /dev/cpuset/autotest
|
_container/tasks',
ignore_status=True)
except IOError:
utils.nuke_subprocess(p)
raise error.TestFail('cat cpuset/tasks failed with IOError')
utils.nuke_subprocess(p)
if result and result.exit_status:
raise error.TestFail('cat cpuset/tasks failed')
|
jlmdegoede/Invoicegen
|
hour_registration/tests.py
|
Python
|
gpl-3.0
| 11,185
| 0.003934
|
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
import datetime
from .models import *
from django.utils import timezone
from django.contrib.auth.models import Group, ContentType, Permission
from django.db.models import Max
# Create your tests here.
class HourRegistrationTestClass(TestCase):
def setUp(self):
now = timezone.now()
next_week = now + datetime.timedelta(7)
self.company = Company.objects.create(company_name='Testbedrijf', company_address='Testadres',
company_city_and_zipcode='Testplaats 1234AB')
self.order_one = Product.objects.create(title='Testopdracht 1', date_received=now, date_deadline=next_week,
quantity=1000,
from_company=self.company, identification_number=1, briefing='Test',
price_per_quantity=0.25, tax_rate=21)
self.order_two = Product.objects.create(title='Testopdracht 2', date_received=now, date_deadline=next_week,
quantity=700,
from_company=self.company, identification_number=1, briefing='Test',
price_per_quantity=0.22, tax_rate=0)
self.c = Client()
group = Group.objects.create(name='Urenregistratie')
content_type = ContentType.objects.get(model='hourregistration')
all_permissions = Permission.objects.filter(content_type=content_type)
group.permissions.set(all_permissions)
group.save()
self.user = User.objects.create_user(username='testuser', email='test@test.nl', password='secret')
self.user.groups.add(group)
self.user.save()
def test_start_time_tracking_login(self):
response = self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.assertEqual(response.status_code, 302)
self.assertTrue('/accounts/login/?next=/' in response.url)
def test_start_time_tracking(self):
self.c.login(username='testuser', password='secret')
response = self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.assertEqual(response.status_code, 200)
str(response.content, encoding='utf8')
self.assertContains(response, 'success')
def test_start_time_tracking_existing(self):
self.c.login(username='test
|
user', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
response = self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.assertEqual(response.status_code, 200)
response_str = str(response.content, encoding='utf8')
self.assertTrue(str(self.order_one.id) in response_str)
self.assertContains(response, 'pk')
self.assertContains(response, self.order_one.title)
|
def test_end_time_tracking(self):
self.c.login(username='testuser', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
response_end = self.c.get(reverse('end_time_tracking', kwargs={'product_id': self.order_one.id}))
self.assertContains(response_end, 'success')
hour_registration = HourRegistration.objects.filter(product=self.order_one, end=None)
self.assertTrue(hour_registration.count() == 0)
def test_end_time_tracking_login(self):
response = self.c.get(reverse('end_time_tracking', kwargs={'product_id': self.order_one.id}))
self.assertEqual(response.status_code, 302)
self.assertTrue('/accounts/login/?next=/' in response.url)
def test_add_description_to_hourregistration(self):
self.c.login(username='testuser', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
test_description = 'Testomschrijving'
response = self.c.post(reverse('add_description_to_hourregistration'), data={'description': test_description, 'product_id': self.order_one.id})
self.assertContains(response, 'success')
h_registration = HourRegistration.objects.filter(product=self.order_one, end=None, description=test_description)
self.assertTrue(h_registration.count() != 0)
def test_get_description_to_hourregistration(self):
self.c.login(username='testuser', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
test_description = 'Testomschrijving'
self.c.post(reverse('add_description_to_hourregistration'), data={'description': test_description, 'product_id': self.order_one.id})
response_get = self.c.get(reverse('add_description_to_hourregistration'), data={'product_id': self.order_one.id})
self.assertContains(response_get, test_description)
def test_get_description_multiple_hrs(self):
self.c.login(username='testuser', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.c.get(reverse('end_time_tracking', kwargs={'product_id': self.order_one.id}))
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.c.get(reverse('end_time_tracking', kwargs={'product_id': self.order_one.id}))
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
test_description = 'Testomschrijving'
self.c.post(reverse('add_description_to_hourregistration'), data={'description': test_description, 'product_id': self.order_one.id})
response = self.c.get(reverse('add_description_to_hourregistration'), data={'product_id': self.order_one.id})
self.assertContains(response, test_description)
def test_add_description_to_hourregistration_login(self):
response = self.c.post(reverse('add_description_to_hourregistration'), data={'description': 'Testomschrijving', 'product_id': self.order_one.id})
self.assertEqual(response.status_code, 302)
self.assertTrue('/accounts/login/?next=/' in response.url)
def test_existing_time_tracking(self):
self.c.login(username='testuser', password='secret')
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
response = self.c.get(reverse('existing_time_tracking'))
response_str = str(response.content, encoding='utf8')
self.assertTrue(str(self.order_one.id) in response_str)
self.assertContains(response, 'pk')
self.assertContains(response, self.order_one.title)
def test_multiple_existing_time_tracking(self):
self.c.login(username='testuser', password='secret')
# order is important
# existing is expected to return the first one started
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_one.id}))
self.c.get(reverse('start_time_tracking', kwargs={'product_id': self.order_two.id}))
response = self.c.get(reverse('existing_time_tracking'))
response_str = str(response.content, encoding='utf8')
self.assertTrue(str(self.order_one.id) in response_str)
self.assertContains(response, 'pk')
self.assertContains(response, self.order_one.title)
def test_none_existing_time_tracking(self):
self.c.login(username='testuser', password='secret')
response = self.c.get(reverse('existing_time_tracking'))
self.assertContains(response, 'existing')
self.assertContains(response, 'False')
def test_existing_time_tracking_login(self):
response = self.c.get(reverse('existing_time_tracking'))
self.assertEqual(response.status_code, 302)
self.assertTrue('/accounts/login/?next=/' in response.url)
def test_delete_hourregistration(self):
self.c.login(username='testuser', password='sec
|
jaeilepp/eggie
|
ui/preprocessDialog.py
|
Python
|
bsd-2-clause
| 23,303
| 0.00515
|
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
from PyQt4.Qt import QApplication, QSettings, pyqtSlot
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
import sys
from threading import Thread, Event
from general.caller import Caller
from ui_PreprocessDialog import Ui_MainWindow
from ui.ui_InfoDialog import Ui_infoDialog
from ui.InfoDialog import InfoDialog
from ui.projectionDialog import ProjectionDialog
from ui.channelSelectionDialog import ChannelSelectionDialog
from ui.saveDialog import SaveDialog
from ui.powerSpectrumWidget import PowerSpectrumWidget
from ui.groupSpectrumDialog import GroupSpectrumDialog
from ui.fileSelectionDialog import FileSelectionDialog
from ui.timeSeriesDialog import TimeSeriesDialog
class PreprocessDialog(QtGui.QMainWindow):
"""
"""
e = Event()
filterFinished = pyqtSignal()
caller = Caller.Instance()
conditions = []
settings = QSettings("CIBR", "Eggie")
def __init__(self):
"""
Init method for the preprocessing dialog.
Redirects stdout to dialog's console.
"""
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.progressBar.setVisible(False)
self.installEventFilters()
self.ui.tableWidgetEvents.setSortingEnabled(False)
#Signals
self.ui.tableWidgetEvents.currentItemChanged.connect(self.\
on_currentChanged)
self.ui.checkBoxLowCutOff.stateChanged.connect(self.on_StateChanged)
self.ui.checkBoxHighCutOff.stateChanged.connect(self.on_StateChanged)
self.ui.checkBoxNotch.stateChanged.connect(self.on_StateChanged)
self.ui.doubleSpinBoxLowCutOff.valueChanged.connect(self.\
ui.pushButtonFilter.setEnabled)
self.ui.doubleSpinBoxHighCutOff.valueChanged.connect(self.\
ui.pushButtonFilter.setEnabled)
self.ui.doubleSpinBoxNotchFilter.valueChanged.connect(self.\
ui.pushButtonFilter.setEnabled)
self.ui.actionDirectOutput.triggered.connect(self.directOutput)
self.ui.actionOpen.triggered.connect(self.on_actionOpen)
self.ui.actionExit.triggered.connect(self.on_actionExit)
sys.stdout = EmittingStream(textWritten=self.normalOutputWritten)
sys.stderr = EmittingStream(textWritten=self.errorOutputWritten)
self.on_actionOpen()
self.ui.lineEditLayout.setText(self.settings.value("Layout", "").\
toString())
def initialize(self):
"""
Method for initializing the dialog.
"""
if not self.caller.raw: return
self.ui.labelRaw.setText(self.caller.raw.info.get('filename'))
self.ui.comboBoxChannelSelect.addItems(self.caller.raw.info.\
get('ch_names'))
index = self.ui.comboBoxChannelSelect.findText('17')
self.ui.comboBoxChannelSelect.setCurrentIndex(index)
self.ui.pushButtonFilter.setEnabled(True)
self.ui.tableWidgetEvents.setSelectionBehavior(1)
self.ui.tableWidgetEvents.setColumnCount(4)
self.ui.tableWidgetEvents.setHorizontalHeaderLabels(["Time (s)",
"Sample",
"Prev. id",
"Current id"])
tmax = np.floor(self.caller.raw.index_as_time(self.caller.raw.n_times))# / 1000.0))
if len(sel
|
f.conditions) == 0:
spectrumWidget = PowerSpectrumWidget(tmax, self)
spectrumWidget.index = 0
spectrumWidget.removeWidget.connect(self.on_RemoveWidget_clicked)
spectrumWidget.channelCopy.connect(self.copyChannels)
self.ui.verticalLayoutConditions.addWidget(spectrumWidget)
self.conditions.append(spectrumWidget)
def directOutput(self):
"""
Method for directing stdout
|
to the console and back.
"""
if self.ui.actionDirectOutput.isChecked():
sys.stdout = EmittingStream(textWritten=self.normalOutputWritten)
sys.stderr = EmittingStream(textWritten=self.errorOutputWritten)
else:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def on_actionOpen(self):
"""
Opens a dialog for selecting a file.
"""
fileDialog = FileSelectionDialog(self)
fileDialog.fileChanged.connect(self.on_FileChanged)
fileDialog.show()
def on_actionExit(self):
"""
Closes the main window.
"""
self.close()
def on_pushButtonFilter_clicked(self, checked=None):
"""
Called when filter button is clicked.
"""
if checked is None or not self.caller.raw: return
self.ui.pushButtonFilter.setEnabled(False)
QtGui.QApplication.setOverrideCursor(QtGui.\
QCursor(QtCore.Qt.WaitCursor))
self.e.clear()
self.thread = Thread(target = self.callFilter)
self.thread.start()
#while (self.thread.is_alive()):
# time.sleep(1)
#self.e.wait()
self.on_FilterFinished()
QtGui.QApplication.restoreOverrideCursor()
def callFilter(self):
"""
A function for calling filtering function in mne.
Performed in a worker thread.
"""
if self.ui.checkBoxHighCutOff.isChecked():
highCutOff = self.ui.doubleSpinBoxHighCutOff.value()
else:
highCutOff = None
if self.ui.checkBoxLowCutOff.isChecked():
lowCutOff = self.ui.doubleSpinBoxLowCutOff.value()
else:
lowCutOff = None
try:
self.caller.raw.filter(l_freq=lowCutOff, h_freq=highCutOff,
n_jobs=2)
except Exception as e:
print str(e)
self.e.set()
return
print self.caller.raw.info['bads']
if self.ui.checkBoxNotch.isChecked():
print "Applying notch filter...\n"
notchFreq = self.ui.doubleSpinBoxNotchFilter.value()
try:
self.caller.raw.notch_filter(freqs=notchFreq)
except Exception as e:
print str(e)
self.e.set()
return
print self.caller.raw.info['bads']
print "Launching mne_browse_raw...\n"
self.e.set()
def on_FilterFinished(self):
"""
Function for adding bad channels via mne_browse_raw.
"""
while not (self.e.is_set()):
sleep(0.5)
self.updateUi()
if self.e.is_set(): break
self.caller.raw.plot(scalings=dict(eeg=40e-6))
plt.show()
self.ui.listWidgetBads.clear()
self.ui.listWidgetBads.addItems(self.caller.raw.info['bads'])
self.e.clear()
print "Finished\n"
self.ui.pushButtonSave.setEnabled(True)
def on_pushButtonAddBads_clicked(self, checked=None):
"""
Called as the add-button is clicked.
Opens a dialog for adding bad channels by hand.
"""
if checked is None or not self.caller.raw: return
badsDialog = ChannelSelectionDialog(self.caller.raw.info['bads'],
"Select bad channels:")
badsDialog.channelsChanged.connect(self.on_BadsChanged)
badsDialog.exec_()
def on_pushButtonFindEogEvents_clicked(self, checked=None):
"""
Finds EOG-events from the raw data.
Called when find eog events -button is clicked.
"""
if checked is None or not self.caller.raw: return
QtGui.QApplication.setOverrideCursor(QtGu
|
dreibh/planetlab-lxc-plcapi
|
PLC/Methods/GetSession.py
|
Python
|
bsd-3-clause
| 1,423
| 0.001405
|
import time
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Auth import Auth
from PLC.Sessions import Session, Sessions
from PLC.Nodes import Node, Nodes
from PLC.Persons import Person, Persons
class GetSession(Method):
"""
Returns a new session key if a user or node authenticated
successfully, faults otherwise.
Default value for 'expires' is 24 hours. Otherwise, the returned
session 'expires' in the given number of seconds.
"""
roles = ['admin', 'pi', 'user', 'tech', 'node']
accepts = [Auth(),
Parameter(int, "expires", nullok=True)]
returns = Session.fields['session_id']
def call(self, auth, expires=None):
# Aut
|
henticated with a session key, just return it
if 'session' in auth:
return auth['session']
session = Session(self.api)
if isinstance(self.caller, Person):
# XXX Make this configurable
if expires is None:
session['expires'] = int(time.time()) + (24 * 60 * 60)
else:
session['expires'] = int(time.time()) + int
|
(expires)
session.sync(commit=False)
if isinstance(self.caller, Node):
session.add_node(self.caller, commit=True)
elif isinstance(self.caller, Person):
session.add_person(self.caller, commit=True)
return session['session_id']
|
seisman/HinetPy
|
tests/localtest_client_multi_threads.py
|
Python
|
mit
| 289
| 0
|
import os
from datetime import datetime
from
|
HinetPy import Client
username = os.environ["HINET_USERNAME"]
password = os.environ["HINET_PASSWORD"]
client = Client(usernam
|
e, password)
starttime = datetime(2017, 1, 1, 0, 0)
client.get_continuous_waveform("0101", starttime, 20, threads=4)
|
urbn/kombu
|
t/unit/asynchronous/test_timer.py
|
Python
|
bsd-3-clause
| 4,311
| 0
|
from __future__ import absolute_import, unicode_literals
import pytest
from datetime import datetime
from case import Mock, patch
from kombu.asynchronous.timer import Entry, Timer, to_timestamp
from kombu.five import bytes_if_py2
class test_to_timestamp:
def test_timestamp(self):
assert to_timestamp(3.13) == 3.13
def test_datetime(self):
assert to_timestamp(datetime.utcnow())
class test_Entry:
def test_call(self):
fun = Mock(name='fun')
tref = Entry(fun, (4, 4), {'moo': 'baz'})
tref()
fun.assert_called_with(4, 4, moo='baz')
def test_cancel(self):
tref = Entry(lambda x: x, (1,), {})
assert not tref.canceled
assert not tref.cancelled
tref.cancel()
assert tref.canceled
assert tref.cancelled
def test_repr(self):
tref = Entry(lambda x: x(1,), {})
assert repr(tref)
def test_hash(self):
assert hash(Entry(lambda: None))
def test_ordering(self):
# we don't care about results, just that it's possible
Entry(lambda x: 1) < Entry(lambda x: 2)
Entry(lambda x: 1) > Entry(lambda x: 2)
Entry(lambda x: 1) >= Entry(lambda x: 2)
Entry(lambda x: 1) <= Entry(lambda x: 2)
def test_eq(self):
x = Entry(lambda x: 1)
y = Entry(lambda x: 1)
assert x == x
assert x != y
class test_Timer:
def test_enter_exit(self):
x = Timer()
x.stop = Mock(name='timer.stop')
with x:
pass
x.stop.assert_called_with()
def test_supports_Timer_interface(self):
x = Timer()
x.stop()
tref = Mock()
x.cancel(tref)
tref.cancel.assert_called_with()
assert x.schedule is x
def test_handle_error(self):
from datetime import datetime
on_error = Mock(name='on_error')
s = Timer(on_error=on_error)
with patch('kombu.asynchronous.timer.to_timestamp') as tot:
tot.side_effect = OverflowError()
s.enter_at(Entry(lambda: None, (), {}),
eta=datetime.now())
s.enter_at(Entry(lambda: None, (), {}), eta=None)
s.on_error = None
with pytest.raises(OverflowError):
s.enter_at(Entry(lambda: None, (), {}),
eta=datetime.now())
on_error.assert_called_once()
exc = on_error.call_args[0][0]
assert isinstance(exc, OverflowError)
def test_call_repeatedly(self):
t = Timer()
try:
t.schedule.enter_after = Mock()
myfun = Mock()
myfun.__name__ = bytes_if_py2('myfun')
t.call_repeatedly(0.03, myfun)
assert t.schedule.enter_after.call_count == 1
args1, _ = t.schedule.enter_after.call_args_list[0]
sec1, tref1, _ = args1
assert sec1 == 0.03
tref1()
assert t.schedule.enter_after.call_count ==
|
2
args2, _ = t.schedule.enter_after.call_args_list[1]
sec2, tref2, _ = args2
assert sec2 == 0.03
tref2.canceled = True
tref2()
assert t.schedule.enter_after.call_count == 2
finally:
t.stop()
@patch('ko
|
mbu.asynchronous.timer.logger')
def test_apply_entry_error_handled(self, logger):
t = Timer()
t.schedule.on_error = None
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
logger.error.assert_called()
def test_apply_entry_error_not_handled(self, stdouts):
t = Timer()
t.schedule.on_error = Mock()
fun = Mock()
fun.side_effect = ValueError()
t.schedule.apply_entry(fun)
fun.assert_called_with()
assert not stdouts.stderr.getvalue()
def test_enter_after(self):
t = Timer()
t._enter = Mock()
fun = Mock(name='fun')
time = Mock(name='time')
time.return_value = 10
t.enter_after(10, fun, time=time)
time.assert_called_with()
t._enter.assert_called_with(20, 0, fun)
def test_cancel(self):
t = Timer()
tref = Mock()
t.cancel(tref)
tref.cancel.assert_called_with()
|
pmacosta/pexdoc
|
docs/support/pcontracts_example_3.py
|
Python
|
mit
| 2,517
| 0.000397
|
# pcontracts_example_3.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0410,W0613,W0702
from __future__ import print_function
import os, pexdoc.pcontracts
@pexdoc.pcontracts.new_contract(ex1=(RuntimeError, "Invalid name"))
def custom_contract1(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(ex1=("Invalid name", RuntimeError))
def custom_contract2(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(ex1=ValueError)
def custom_contract3(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(
ex1=(ValueError, "Argument `*[argument_name]*` is not valid")
)
def custom_contract4(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(ex1="Invalid name")
def custom_contract5(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(ex1=("Invalid name", RuntimeError))
def custom_contract6(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc
|
.pcontracts.new_contract((OSError, "File could not be opened"))
def custom_contract7(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract("Invalid name")
def custom_contract8(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(TypeError)
def custom_contract9(arg):
if not arg:
raise ValueError(pexdoc.pcontracts
|
.get_exdesc())
@pexdoc.pcontracts.new_contract()
def custom_contract10(arg):
if not arg:
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.new_contract(
(TypeError, "Argument `*[argument_name]*` has to be a string")
)
def custom_contract11(city):
if not isinstance(city, str):
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.contract(city_name="custom_contract11")
def print_city_name(city_name):
return "City: {0}".format(city_name)
@pexdoc.pcontracts.new_contract((OSError, "File `*[fname]*` not found"))
def custom_contract12(fname):
if not os.path.exists(fname):
raise ValueError(pexdoc.pcontracts.get_exdesc())
@pexdoc.pcontracts.contract(fname="custom_contract12")
def print_fname(fname):
print("File name to find: {0}".format(fname))
|
lovelysystems/pyjamas
|
examples/timesheet/ApplicationConstants.py
|
Python
|
apache-2.0
| 951
| 0.023134
|
# vim: set ts=4 sw=4 expandtab:
class Notification(object):
STARTUP = "startup"
SHOW_DIALOG = "showDialog"
HELLO = "hello"
#
|
Menu
MENU_FILE_OPEN = "menuFileOpen"
MENU_FILE_SAVEAS = "menuFileSaveAs"
MENU_FILE_PREFS = "menuFilePreferences"
MENU_VIEW_EDIT = "menuViewEdit"
MENU_VIEW_SUM = "menuViewS
|
ummary"
MENU_HELP_CONTENTS = "menuHelpContents"
MENU_HELP_ABOUT = "menuHelpAbout"
FILE_LOADED = "fileLoaded"
EDIT_SELECTED = "editMode"
SUM_SELECTED = "summaryMode"
# Date picker
DISPLAY_DAY = "displayDay"
PREV_DAY = "previousDay"
NEXT_DAY = "nextDay"
PREV_WEEK = "previousWeek"
NEXT_WEEK = "nextWeek"
DATE_SELECTED = "dateSelected"
# Time Grid
CELL_SELECTED = "cellSelected"
CELL_UPDATED = "cellUpdated"
|
KrzysztofMadejski/volontulo
|
apps/volontulo/utils.py
|
Python
|
mit
| 2,269
| 0
|
# -*- coding: utf-8 -*-
u"""
.. module:: utils
"""
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.utils.text import slugify
from apps.volontulo.models import UserProfile
# Offers statuses dictionary with meaningful names.
# todo: remove dependency
OFFERS_STATUSES = {
'NEW': u"Nowa",
'ACTIVE': u"Aktywna",
'FINISHED': u"Zakończona",
'SUSPENDED': u"Zawieszona",
'CLOSED': u"Zamknięta",
}
def get_administrators_emails():
u"""Get all administrators emails or superuser email
Format returned:
emails = {
1: 'admin1@example.com',
2: 'admin2@example.com',
}
"""
administrators = UserProfile.objects.filter(is_administrator=True)
emails = {}
for admin in administrators:
emails[str(admin.user.id)] = admin.user.email
if not emails:
administrators = User.objects.filter(is_superuser=True)
for admin in administrators:
emails[str(admin.id)] = admin.email
return emails
def save_history(req, obj, action):
u"""Save model change
|
s history."""
LogEntry.objects.log
|
_action(
user_id=req.user.pk,
content_type_id=ContentType.objects.get_for_model(obj).pk,
object_id=obj.pk,
object_repr=str(obj),
action_flag=action
)
def correct_slug(model_class, view_name, slug_field):
u"""Decorator that is reposponsible for redirect to url with correct slug.
It is used by url for offers, organizations and users.
"""
def decorator(wrapped_func):
u"""Decorator function for correcting slugs."""
def wrapping_func(request, slug, id_):
u"""Wrapping function for correcting slugs."""
obj = get_object_or_404(model_class, id=id_)
if slug != slugify(getattr(obj, slug_field)):
return redirect(
view_name,
slug=slugify(getattr(obj, slug_field)),
id_=id_
)
return wrapped_func(request, slug, id_)
return wrapping_func
return decorator
|
racheliel/My-little-business
|
MyLittleBuisness/web/pages/page.py
|
Python
|
mit
| 648
| 0.018519
|
from google.appengine.ext.webapp import template
from models.user import User
from models.page import Page
import webapp2
import json
class PageHandler(webapp2.RequestHandler):
def get(self, page_
|
id):
template_params = {}
user = None
if self.request.cookies.get('session'):
user = User.chec
|
kToken(self.request.cookies.get('session'))
if not user:
self.redirect('/')
page = Page.getPageUser(user,title)
if page:
html = template.render("web/templates/page.html", template_params)
self.response.write(html)
app = webapp2.WSGIApplication([
('/pages/(.*)', PageHandler),
], debug=True)
|
lpsinger/astropy
|
astropy/coordinates/tests/test_angles.py
|
Python
|
bsd-3-clause
| 35,759
| 0.001064
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initalization and other aspects of Angle and subclasses"""
import threading
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.errors import (
IllegalSecondError, IllegalMinuteError, IllegalHourError,
IllegalSecondWarning, IllegalMinuteWarning)
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_create_angles():
"""
Tests creating and accessing Angle objects
"""
''' The "angle" is a fundamental object. The internal
representation is stored in radians, but this is transparent to the user.
Units *must* be specified rather than a default value be assumed. This is
as much for self-documenting code as anything else.
Angle objects simply represent a single angular coordinate. More specific
angular coordinates (e.g. Longitude, Latitude) are subclasses of Angle.'''
a1 = Angle(54.12412, unit=u.degree)
a2 = Angle("54.12412", unit=u.degree)
a3 = Angle("54:07:26.832", unit=u.degree)
a4 = Angle("54.12412 deg")
a5 = Angle("54.12412 degrees")
a6 = Angle("54.12412°") # because we like Unicode
a7 = Angle((54, 7, 26.832), unit=u.degree)
a8 = Angle("54°07'26.832\"")
# (deg,min,sec) *tuples* are acceptable, but lists/arrays are *not*
# because of the need to eventually support arrays of coordinates
a9 = Angle([54, 7, 26.832], unit=u.degree)
assert_allclose(a9.value, [54, 7, 26.832])
assert a9.unit is u.degree
a10 = Angle(3.60827466667, unit=u.hour)
a11 = Angle("3:36:29.7888000120", unit=u.hour)
a12 = Angle((3, 36, 29.7888000120), unit=u.hour) # *must* be a tuple
# Regression test for #5001
a13 = Angle((3, 36, 29.7888000120), unit='hour')
Angle(0.944644098745, unit=u.radian)
with pytest.raises(u.UnitsError):
Angle(54.12412)
# raises an exception because this is ambiguous
with pytest.raises(u.UnitsError):
Angle(54.12412, unit=u.m)
with pytest.raises(ValueError):
Angle(12.34, unit="not a unit")
a14 = Angle("03h36m29.7888000120") # no trailing 's', but unambiguous
a15 = Angle("5h4m3s") # single digits, no decimal
assert a15.unit == u.hourangle
a16 = Angle("1 d")
a17 = Angle("1 degree")
assert a16.degree == 1
assert a17.degree == 1
a18 = Angle("54 07.4472", unit=u.degree)
a19 = Angle("54:07.4472", unit=u.degree)
a20 = Angle("54d07.4472m", unit=u.degree)
a21 = Angle("3h36m", unit=u.hour)
a22 = Angle("3.6h", unit=u.hour)
a23 = Angle("- 3h", unit=u.hour)
a24 = Angle("+ 3h", unit=u.hour)
# ensure the above angles that should match do
assert a1 == a2 == a3 == a4 == a5 == a6 == a7 == a8 == a18 == a19 == a20
assert_allclose(a1.radian, a2.radian)
assert_allclose(a2.degree, a3.degree)
assert_allclose(a3.radian, a4.radian)
assert_allclose(a4.radian, a5.radian)
assert_allclose(a5.radian, a6.radian)
assert_allclose(a6.radian, a7.radian)
assert_allclose(a10.degree, a11.degree)
assert a11 == a12 == a13 == a14
assert a21 == a22
assert a23 == -a24
# check for illegal ranges / values
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.degree)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.degree)
with pytest.raises(IllegalSecondError):
a = Angle("12 32 99", unit=u.hour)
with pytest.raises(IllegalMinuteError):
a = Angle("12 99 23", unit=u.hour)
with pytest.raises(IllegalHourError):
a = Angle("99 25 51.0", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12 25 51.0xxx", unit=u.hour)
with pytest.raises(ValueError):
a = Angle("12h34321m32.2s")
assert a1 is not None
def test_angle_from_view():
q = np.arange(3.) * u.deg
a = q.view(Angle)
assert type(a) is Angle
assert a.unit is q.unit
assert np.all(a == q)
q2 = np.arange(4) * u.m
with pytest.raises(u.UnitTypeError):
q2.view(Angle)
def test_angle_ops():
"""
Tests operations on Angle objects
"""
# Angles can be added and subtracted. Multiplication and division by a
# scalar is also permitted. A negative operator is also valid. All of
# these operate in a single dimension. Attempting to multiply or divide two
# Angle objects will return a quantity. An exception will be raised if it
# is attempted to store output with a non-angular unit in an Angle [#2718].
a1 = Angle(3.60827466667, unit=u.hour)
a2 = Angle("54:07:26.832", unit=u.degree)
a1 + a2 # creates new Angle object
a1 - a2
-a1
assert_allclose((a1 * 2).hour, 2 * 3.6082746666700003)
assert abs((a1 / 3.123456).hour - 3.60827466667 / 3.123456) < 1e-10
# commutativity
assert (2 * a1).hour == (a1 * 2).hour
a3 = Angle(a1) # makes a *copy* of the object, but identical content as a1
assert_allclose(a1.radian, a3.radian)
assert a1 is not a3
a4 = abs(-a1)
assert a4.radian == a1.radian
a5 = Angle(5.0, unit=u.hour)
assert a5 > a1
assert a5 >= a1
assert a1 < a5
assert a1 <= a5
# check operations with non-angular result give Quantity.
a6 = Angle(45., u.degree)
a7 = a6 * a5
assert type(a7) is u.Quantity
# but those with angular result yi
|
eld Angle.
# (a9 is regression test for #5327)
a8 = a1 + 1.*u.deg
assert type(a8) is Angle
a9 = 1.*u.deg + a1
assert type(a9) is Angle
with pytest.raises(TypeError):
a6 *= a5
with pytest.raises(TypeError):
a6 *= u.m
with pytest.raises(TypeError):
np.sin(a6, out=a6)
def test_angle_methods():
# Most methods tested as part of the Quantity tests.
# A few tests here
|
which caused problems before: #8368
a = Angle([0., 2.], 'deg')
a_mean = a.mean()
assert type(a_mean) is Angle
assert a_mean == 1. * u.degree
a_std = a.std()
assert type(a_std) is Angle
assert a_std == 1. * u.degree
a_var = a.var()
assert type(a_var) is u.Quantity
assert a_var == 1. * u.degree ** 2
a_ptp = a.ptp()
assert type(a_ptp) is Angle
assert a_ptp == 2. * u.degree
a_max = a.max()
assert type(a_max) is Angle
assert a_max == 2. * u.degree
a_min = a.min()
assert type(a_min) is Angle
assert a_min == 0. * u.degree
def test_angle_convert():
"""
Test unit conversion of Angle objects
"""
angle = Angle("54.12412", unit=u.degree)
assert_allclose(angle.hour, 3.60827466667)
assert_allclose(angle.radian, 0.944644098745)
assert_allclose(angle.degree, 54.12412)
assert len(angle.hms) == 3
assert isinstance(angle.hms, tuple)
assert angle.hms[0] == 3
assert angle.hms[1] == 36
assert_allclose(angle.hms[2], 29.78879999999947)
# also check that the namedtuple attribute-style access works:
assert angle.hms.h == 3
assert angle.hms.m == 36
assert_allclose(angle.hms.s, 29.78879999999947)
assert len(angle.dms) == 3
assert isinstance(angle.dms, tuple)
assert angle.dms[0] == 54
assert angle.dms[1] == 7
assert_allclose(angle.dms[2], 26.831999999992036)
# also check that the namedtuple attribute-style access works:
assert angle.dms.d == 54
assert angle.dms.m == 7
assert_allclose(angle.dms.s, 26.831999999992036)
assert isinstance(angle.dms[0], float)
assert isinstance(angle.hms[0], float)
# now make sure dms and signed_dms work right for negative angles
negangle = Angle("-54.12412", unit=u.degree)
assert negangle.dms.d == -54
assert negangle.dms.m == -7
assert_allclose(negangle.dms.s, -26.831999999992036)
assert negangle.signed_dms.sign == -1
assert negangle.signed_dms.d == 54
assert negangle.signed_dms.m == 7
assert_allclose(negangle.signed_dms.s, 26.831999999992036)
def test_angle_formatting():
"""
Tests string formatting
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCN13ID_october2013.py
|
Python
|
gpl-3.0
| 11,697
| 0.010088
|
#!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'mccann at mbari.org'
__doc__ = '''
Master loader for all Worden's CN13ID Western Flyer cruise in October 2013
CN13ID: CANON 2013 Interdisciplinary
Mike McCann
MBARI 23 October 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
import time # for startdate, enddate args
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE']='config.settings.local'
project_dir = os.path.dirname(__file__)
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
# building input data sources object
cl = CANONLoader('stoqs_cn13id_oct2013', 'CN13ID - October 2013',
descr
|
iption = 'Warden cruise on Western Flyer into the California Current System off Monterey Bay',
x3dTerrains = {
'https://stoqs.mbari.
|
org/x3d/Globe_1m_bath_10x/Globe_1m_bath_10x_scene.x3d': {
'position': '14051448.48336 -15407886.51486 6184041.22775',
'orientation': '0.83940 0.33030 0.43164 1.44880',
'centerOfRotation': '0 0 0',
'VerticalExaggeration': '10',
}
},
grdTerrain = os.path.join(parentDir, 'Globe_1m_bath.grd')
)
# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
startdate = datetime.datetime(2013, 10, 6) # Fixed start
enddate = datetime.datetime(2013, 10, 18) # Fixed end
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# special location for dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2013/netcdf/'
cl.dorado_files = [
'Dorado389_2013_280_01_280_01_decim.nc',
'Dorado389_2013_282_00_282_00_decim.nc',
'Dorado389_2013_283_00_283_00_decim.nc',
'Dorado389_2013_287_01_287_01_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw',
]
######################################################################
# GLIDERS
######################################################################
# SPRAY glider - for just the duration of the campaign
cl.l_662_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line66/'
cl.l_662_files = ['OS_Glider_L_662_20130711_TS.nc']
cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2']
cl.l_662_startDatetime = startdate
cl.l_662_endDatetime = enddate
######################################################################
# WESTERN FLYER: October 6-17
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'CANON_october2013/Platforms/Ships/Western_Flyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
'CN13IDm01.nc', 'CN13IDm02.nc', 'CN13IDm03.nc', 'CN13IDm04.nc', 'CN13IDm05.nc', 'CN13IDm06.nc', 'CN13IDm07.nc', 'CN13IDm08.nc', 'CN13IDm09.nc', 'CN13IDm10.nc',
'CN13IDm11.nc', 'CN13IDm12.nc', 'CN13IDm13.nc', 'CN13IDm14.nc',
]
# PCTD
cl.pctdDir = 'CANON_october2013/Platforms/Ships/Western_Flyer/pctd/'
cl.wfpctd_base = cl.dodsBase + cl.pctdDir
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl' , 'oxygen']
cl.wfpctd_files = [
'CN13IDc01.nc', 'CN13IDc02.nc', 'CN13IDc03.nc', 'CN13IDc04.nc', 'CN13IDc05.nc', 'CN13IDc06.nc', 'CN13IDc07.nc', 'CN13IDc08.nc', 'CN13IDc09.nc', 'CN13IDc10.nc',
'CN13IDc11.nc', 'CN13IDc12.nc', 'CN13IDc13.nc', 'CN13IDc14.nc', 'CN13IDc15.nc', 'CN13IDc16.nc', 'CN13IDc17.nc', 'CN13IDc18.nc', 'CN13IDc19.nc', 'CN13IDc20.nc',
'CN13IDc21.nc', 'CN13IDc22.nc', 'CN13IDc23.nc', 'CN13IDc24.nc', 'CN13IDc25.nc', 'CN13IDc26.nc', 'CN13IDc27.nc', 'CN13IDc28.nc', 'CN13IDc29.nc', 'CN13IDc30.nc',
'CN13IDc31.nc', 'CN13IDc32.nc', 'CN13IDc33.nc', 'CN13IDc34.nc', 'CN13IDc35.nc', 'CN13IDc36.nc', 'CN13IDc37.nc', 'CN13IDc38.nc', 'CN13IDc39.nc', 'CN13IDc40.nc',
'CN13IDc41.nc', 'CN13IDc42.nc', 'CN13IDc43.nc', 'CN13IDc44.nc', 'CN13IDc45.nc', 'CN13IDc46.nc', 'CN13IDc47.nc', 'CN13IDc48.nc', 'CN13IDc49.nc', 'CN13IDc50.nc',
##'CN13IDc51.nc', 'CN13IDc52.nc', 'CN13IDc53.nc', 'CN13IDc54.nc',
]
# BCTD
# SubSample data files from /mbari/BOG_Archive/ReportsForSTOQS/GOC12/ copied to local BOG_Data dir
cl.bctdDir = 'CANON_october2013/Platforms/Ships/Western_Flyer/bctd/'
cl.subsample_csv_base = cl.dodsBase + cl.bctdDir
cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'BOG_Data')
cl.subsample_csv_files = [
#'STOQS_canon13_CHL_1U.csv', 'STOQS_canon13_CHL_5U.csv', 'STOQS_canon13_NH4.csv', 'STOQS_canon13_NO2.csv',
#'STOQS_canon13_NO3.csv','STOQS_canon13_OXY_ML.csv', 'STOQS_canon13_PHAEO_1U.csv', 'STOQS_canon13_PHAEO_5U.csv',
#'STOQS_canon13_PHAEO_GFF.csv', 'STOQS_canon13_PO4.csv', 'STOQS_canon13_SIO4.csv', #'STOQS_canon13_CARBON_GFF.csv
#'STOQS_canon13_CHL_GFF.csv',
]
######################################################################
# MOORINGS
######################################################################
# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'201309/OS_M1_20130918hourly_CMSTV.nc'
]
cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring OA1 CTD
cl.oaDir = 'CANON_september2013/Platforms/Moorings/OA_1/'
cl.OA1ctd_base = cl.dodsBase + cl.oaDir
cl.OA1ctd_files = ['OA1_ctd_2013.nc']
cl.OA1ctd_parms = ['TEMP', 'PSAL', 'conductivity' ]
cl.OA1ctd_startDatetime = startdate
cl.OA1ctd_endDatetime = enddate
# Mooring OA1 MET
cl.OA1met_base = cl.dodsBase + cl.oaDir
cl.OA1met_files = ['OA1_met_2013.nc']
cl.OA1met_parms = ['Wind_direction','Wind_speed','Air_temperature','Barometric_pressure']
cl.OA1met_startDatetime = startdate
cl.OA1met_endDatetime = enddate
# Mooring OA1 PH
cl.OA1pH_base = cl.dodsBase + cl.oaDir
cl.OA1pH_files = ['OA1_pH_2013.nc']
cl.OA1pH_parms = ['pH' ]
cl.OA1pH_startDatetime = startdate
cl.OA1pH_endDatetime = enddate
# Mooring OA1 PCO2
cl.OA1pco2_base = cl.dodsBase + cl.oaDir
cl.OA1pco2_files = ['OA1_pco2_2013.nc']
cl.OA1pco2_parms = ['pCO2' ]
cl.OA1pco2_startDatetime = startdate
cl.OA1pco2_endDatetime = enddate
# Mooring OA1 O2
cl.OA1o2_base = cl.dodsBase + cl.oaDir
cl.OA1o2_files = ['OA1_o2_2013.nc']
cl.OA1o2_parms = ['oxygen', 'oxygen_saturation' ]
cl.OA1o2_startDatetime = startdate
cl.OA1o2_endDatetime = enddate
# Mooring OA1 Fluorescence
cl.OA1fl_base = cl.dodsBase + cl.oaDir
cl.OA1fl_files = ['OA1_fl_2013.nc']
cl.OA1fl_parms = [ 'fluor' ]
cl.OA1fl_startDatetime = startdate
cl.OA1fl_endDatetime = enddate
# Mooring OA2 CTD
cl.oaDir = 'CANON_september2013/Platforms/Moorings/OA_2/'
cl.OA2ctd_base = cl.dodsBase + cl.oaDir
cl.OA2ctd_files = ['OA2_ctd_2013.nc']
cl.OA2ctd_parms = ['TEMP', 'PSAL', 'conductivity' ]
cl.OA2ctd_startDatetime = startdate
cl.OA2ctd_endDatetime = enddate
# Mooring OA
|
townboy/coil
|
solver.py
|
Python
|
mit
| 9,646
| 0.005183
|
#!/usr/bin/env python
# -*- utf-8 -*-
import re
import copy
import time
# This is const data
direct = ['u', 'r', 'd', 'l']
direct_vector = [{'x' : -1, 'y' : 0},
{'x' : 0, 'y' : 1},
{'x' : 1, 'y' : 0},
{'x' : 0, 'y' : -1}]
def calculateNum(i, f, y):
return i * y + f
def is_empty_not_change(map_data, i, f):
if i >= map_data['x'] or i < 0:
return False
if f >= map_data['y'] or f < 0:
return False
return map_data['map'][i][f]
def is_empty(map_data, i, f):
if i >= map_data['x'] or i < 0:
return False
if f >= map_data['y'] or f < 0:
return False
if not map_data['map'][i][f]:
return False
map_data['map'][i][f] = False
return True
def parse_map(raw_str):
map_data = {}
print 'start parse map info\n'
print 'here is map info ' + raw_str
# x is height , y is width
x = int(re.search('x=[0-9]+&', raw_str).group()[2:-1])
y = int(re.search('y=[0-9]+&', raw_str).group()[2:-1])
str_map = raw_str[-x*y : ]
map_data['x'] = x;
map_data['y'] = y;
map_data['map'] = []
for i in range(x):
this_row = []
for f in range(y):
if '1' == str_map[calculateNum(i, f, y)]:
this_row.append(False)
else:
this_row.append(True)
map_data['map'].append(copy.deepcopy(this_row))
'''
for i in range(x):
for f in range(y):
if not map_data['map'][i][f]:
print '#',
else:
print '*',
print ''
print 'this level x =', x, 'y =', y
'''
return map_data
def count_du_1(map_data):
response = []
for i in range(map_data['x']):
for f in range(map_data['y']):
if not map_data['map'][i][f]:
continue
du = 0
for dir_enum in range(4):
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if is_empty_not_change(map_data, new_x, new_y):
du += 1
if 1 == du:
response.append([i, f])
return response
def get_must_info(map_data, x, y): # need update
fill = 0
flag_id = 0
map_data_another = copy.deepcopy(map_data)
def div_dfs(x, y):
count = 0
map_data_another['map'][x][y] = False
for i in range(4):
new_x = x + direct_vector[i]['x']
new_y = y + direct_vector[i]['y']
if is_empty(map_data_another, new_x ,new_y):
count += div_dfs(new_x, new_y)
return count + 1
def dfs(x, y, flag):
map_data['map'][x][y] = flag
for i in range(4):
new_x = x + direct_vector[i]['x']
new_y = y + direct_vector[i]['y']
if judge_empty(new_x, new_y):
if isinstance(map_data['map'][new_x][new_y], bool):
dfs(new_x, new_y, flag)
def judge_empty(x, y):
if x < 0 or x >= map_data['x']:
return False
if y < 0 or y >= map_data['y']:
return False
if isinstance(map_data['map'][x][y], bool):
return map_data['map'][x][y]
return True
for i in range(map_data['x']):
for f in range(map_data['y']):
if map_data['map'][i][f]:
fill += 1
du = 0
first_x, first_y = i, f
for dir_enum in range(4):
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if True == judge_empty(new_x, new_y):
du += 1
if du < 3:
map_data['map'][i][f] = flag_id
flag_id += 1
if 0 == fill:
return 0, True
if div_dfs(first_x, first_y) != fill:
return fill, False
for i in range(map_data['x']):
for f in range(map_data['y']):
if (True == judge_empty(i, f)) and isinstance(map_data['map'][i][f], bool):
dfs(i, f, flag_id)
flag_id += 1
#tongji
edge_table = []
for i in range(flag_id):
edge_table.append([])
du_count = [0] * flag_id
for i in range(map_data['x']):
for f in range(map_data['y']):
if not isinstance(map_data['map'][i][f], bool):
for dir_enum in [0, 3]:
new_x = i + direct_vector[dir_enum]['x']
new_y = f + direct_vector[dir_enum]['y']
if judge_empty(new_x, new_y):
point_foo = map_data['map'][i][f]
point_bar = map_data['map'][new_x][new_y]
if point_foo != point_bar:
du_count[point_foo] += 1
du_count[point_bar] += 1
edge_table[point_foo].append(point_bar)
edge_table[point_bar].append(point_foo)
du_odd_count = 0
du_zero_exist = False
for point_enum in range(flag_id):
if 0 == du_count[point_enum]:
du_zero_exist = True
if 1 == (du_count[point_enum] % 2):
du_odd_count += 1
# exist zero du
if len(du_count) > 1 and du_zero_exist:
return fill ,False
# all even point
if 0 == du_odd_count:
return fill, True
if 2 != du_odd_count:
return fill, False
# start point enum, odd point equal 2
for dir_enum in range(4):
new_x = x + direct_vector[dir_enum]['x']
new_y = y + direct_vector[dir_enum]['y']
if judge_empty(new_x, new_y):
start_id = map_data['map'][new_x][new_y]
if 1 == (du_count[start_id] % 2):
return fill, True
for another_point in edge_table[start_id]:
if 1 == (du_count[another_point] % 2):
return fill, True
|
return fill, False
# just fit small 100 hash
def hash_function(map_data, x, y):
response = 0
for i in range(map_data['x']):
for f in range(map_data['y']):
response *= 2
if map_data['map'][i][f]:
response += 1
response = response * 100 + x
response = response * 100 + y
# return hash(response) conflict
return response
def solve(map_data):
hash_table = {}
node = [0]
def dfs(x, y, last_dir, path):
node[0]
|
+= 1
fill, is_continue = get_must_info(copy.deepcopy(map_use_dfs), x, y)
if 0 == fill:
return path
if not is_continue:
return 'no solution'
hash_code = hash_function(map_use_dfs, x, y)
if hash_code in hash_table:
return 'no solution'
hash_table[hash_code] = True
if -1 == last_dir:
dir_step = [0, 1, 2, 3]
else:
dir_step = [(last_dir + 5) % 4, (last_dir + 3) % 4]
for dir_enum in dir_step:
new_x, new_y = x + direct_vector[dir_enum]['x'], y + direct_vector[dir_enum]['y']
step_forward = 0
while is_empty(map_use_dfs, new_x, new_y):
step_forward += 1
new_x += direct_vector[dir_enum]['x']
new_y += direct_vector[dir_enum]['y']
new_x -= direct_vector[dir_enum]['x']
new_y -= direct_vector[dir_enum]['y']
# this direct is ok
if step_forward >= 1:
solution = dfs(new_x, new_y, dir_enum, path + direct[dir_enum])
if 'no solution' != solution:
return solution
step_x, step_y = x, y
for step_enum in range(step_forward):
step_x += direct_vector[dir_enum]['x']
step_y += direct_vector[dir_enum]['y']
map_use_dfs['map'][step_x][step_y] = True
return 'no solution'
response = {}
# handle the du = 1
du_1_vector = count_du_1(map_data)
for du_1_enum in du_1_vector:
x, y = du_1_enum[0], du_1_enum[1]
print 'first from point', x, y, 'start dfs'
|
ArcherSys/ArcherSys
|
Lib/test/test_plistlib.py
|
Python
|
mit
| 63,176
| 0.003134
|
<<<<<<< HEAD
<<<<<<< HEAD
# Copyright (C) 2003-2013 Python Software Foundation
import unittest
import plistlib
import os
import datetime
import codecs
import binascii
import collections
import struct
from test import support
from io import BytesIO
ALL_FORMATS=(plistlib.FMT_XML, plistlib.FMT_BINARY)
# The testdata is generated using Mac/Tools/plistlib_generate_testdata.py
# (which using PyObjC to control the Cocoa classes for generating plists)
TESTDATA={
plistlib.FMT_XML: binascii.a2b_base64(b'''
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NU
WVBFIHBsaXN0IFBVQkxJQyAiLS8vQXBwbGUvL0RURCBQTElTVCAxLjAvL0VO
IiAiaHR0cDovL3d3dy5hcHBsZS5jb20vRFREcy9Qcm9wZXJ0eUxpc3QtMS4w
LmR0ZCI+CjxwbGlzdCB2ZXJzaW9uPSIxLjAiPgo8ZGljdD4KCTxrZXk+YUJp
Z0ludDwva2V5PgoJPGludGVnZXI+OTIyMzM3MjAzNjg1NDc3NTc2NDwvaW50
ZWdlcj4KCTxrZXk+YUJpZ0ludDI8L2tleT4KCTxpbnRlZ2VyPjkyMjMzNzIw
MzY4NTQ3NzU4NTI8L2ludGVnZXI+Cgk8a2V5PmFEYXRlPC9rZXk+Cgk8ZGF0
ZT4yMDA0LTEwLTI2VDEwOjMzOjMzWjwvZGF0ZT4KCTxrZXk+YURpY3Q8L2tl
eT4KCTxkaWN0PgoJCTxrZXk+YUZhbHNlVmFsdWU8L2tleT4KCQk8ZmFsc2Uv
PgoJCTxrZXk+YVRydWVWYWx1ZTwva2V5PgoJCTx0cnVlLz4KCQk8a2V5PmFV
bmljb2RlVmFsdWU8L2tleT4KCQk8c3RyaW5nPk3DpHNzaWcsIE1hw588L3N0
cmluZz4KCQk8a2V5PmFub3RoZXJTdHJpbmc8L2tleT4KCQk8c3RyaW5nPiZs
dDtoZWxsbyAmYW1wOyAnaGknIHRoZXJlISZndDs8L3N0cmluZz4KCQk8a2V5
PmRlZXBlckRpY3Q8L2tleT4KCQk8ZGljdD4KCQkJPGtleT5hPC9rZXk+CgkJ
CTxpbnRlZ2VyPjE3PC9pbnRlZ2VyPgoJCQk8a2V5PmI8L2tleT4KCQkJPHJl
YWw+MzIuNTwvcmVhbD4KCQkJPGtleT5jPC9rZXk+CgkJCTxhcnJheT4KCQkJ
CTxpbnRlZ2VyPjE8L2ludGVnZXI+CgkJCQk8aW50ZWdlcj4yPC9pbnRlZ2Vy
PgoJCQkJPHN0cmluZz50ZXh0PC9zdHJpbmc+CgkJCTwvYXJyYXk+CgkJPC9k
aWN0PgoJPC9kaWN0PgoJPGtleT5hRmxvYXQ8L2tleT4KCTxyZWFsPjAuNTwv
cmVhbD4KCTxrZXk+YUxpc3Q8L2tleT4KCTxhcnJheT4KCQk8c3RyaW5nPkE8
L3N0cmluZz4KCQk8c3RyaW5nPkI8L3N0cmluZz4KCQk8aW50ZWdlcj4xMjwv
aW50ZWdlcj4KCQk8cmVhbD4zMi41PC9yZWFsPgoJCTxhcnJheT4KCQkJPGlu
dGVnZXI+MTwvaW50ZWdlcj4KCQkJPGludGVnZXI+MjwvaW50ZWdlcj4KCQkJ
PGludGVnZXI+MzwvaW50ZWdlcj4KCQk8L2FycmF5PgoJPC9hcnJheT4KCTxr
ZXk+YU5lZ2F0aXZlQmlnSW50PC9rZXk+Cgk8aW50ZWdlcj4tODAwMDAwMDAw
MDA8L2ludGVnZXI+Cgk8a2V5PmFOZWdhdGl2ZUludDwva2V5PgoJPGludGVn
ZXI+LTU8L2ludGVnZXI+Cgk8a2V5PmFTdHJpbmc8L2tleT4KCTxzdHJpbmc+
RG9vZGFoPC9zdHJpbmc+Cgk8a2V5PmFuRW1wdHlEaWN0PC9rZXk+Cgk8ZGlj
dC8+Cgk8a2V5PmFuRW1wdHlMaXN0PC9rZXk+Cgk8YXJyYXkvPgoJPGtleT5h
bkludDwva2V5PgoJPGludGVnZXI+NzI4PC9pbnRlZ2VyPgoJPGtleT5uZXN0
ZWREYXRhPC9rZXk+Cgk8YXJyYXk+CgkJPGRhdGE+CgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5k
VzVyCgkJUGdBQkFnTThiRzkwY3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJ
RFBHeHZkSE1nYjJZZ1ltbHVZWEo1CgkJSUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004Ykc5MGN5QnZaaUJpCgkJYVc1
aGNua2daM1Z1YXo0QUFRSURQR3h2ZEhNZ2IyWWdZbWx1WVhKNUlHZDFibXMr
QUFFQ0F6eHNiM1J6CgkJSUc5bUlHSnBibUZ5ZVNCbmRXNXJQZ0FCQWdNOGJH
OTBjeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlECgkJUEd4dmRITWdiMlln
WW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09CgkJPC9kYXRhPgoJPC9hcnJheT4K
CTxrZXk+c29tZURhdGE8L2tleT4KCTxkYXRhPgoJUEdKcGJtRnllU0JuZFc1
clBnPT0KCTwvZGF0YT4KCTxrZXk+c29tZU1vcmVEYXRhPC9rZXk+Cgk8ZGF0
YT4KCVBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytBQUVDQXp4c2IzUnpJ
RzltSUdKcGJtRnllU0JuZFc1clBnQUJBZ004CgliRzkwY3lCdlppQmlhVzVo
Y25rZ1ozVnVhejRBQVFJRFBHeHZkSE1nYjJZZ1ltbHVZWEo1SUdkMWJtcytB
QUVDQXp4cwoJYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVyUGdBQkFnTThiRzkw
Y3lCdlppQmlhVzVoY25rZ1ozVnVhejRBQVFJRFBHeHYKCWRITWdiMllnWW1s
dVlYSjVJR2QxYm1zK0FBRUNBenhzYjNSeklHOW1JR0pwYm1GeWVTQm5kVzVy
UGdBQkFnTThiRzkwCgljeUJ2WmlCaWFXNWhjbmtnWjNWdWF6NEFBUUlEUEd4
|
dmRITWdiMllnWW1sdVlYSjVJR2QxYm1zK0FBRUNBdz09Cgk8L2RhdGE+Cgk8
a2V5PsOFYmVucmFhPC9rZXk+Cgk8c3RyaW5nPlRoYXQgd2FzIGEgdW5pY29k
ZSBrZXkuPC9zdHJpbmc+CjwvZGljdD4KPC9wbGlzdD4K'''),
plistlib.FMT_BINARY: binascii
|
.a2b_base64(b'''
YnBsaXN0MDDfEBABAgMEBQYHCAkKCwwNDg8QERITFCgpLzAxMjM0NTc2OFdh
QmlnSW50WGFCaWdJbnQyVWFEYXRlVWFEaWN0VmFGbG9hdFVhTGlzdF8QD2FO
ZWdhdGl2ZUJpZ0ludFxhTmVnYXRpdmVJbnRXYVN0cmluZ1thbkVtcHR5RGlj
dFthbkVtcHR5TGlzdFVhbkludFpuZXN0ZWREYXRhWHNvbWVEYXRhXHNvbWVN
b3JlRGF0YWcAxQBiAGUAbgByAGEAYRN/////////1BQAAAAAAAAAAIAAAAAA
AAAsM0GcuX30AAAA1RUWFxgZGhscHR5bYUZhbHNlVmFsdWVaYVRydWVWYWx1
ZV1hVW5pY29kZVZhbHVlXWFub3RoZXJTdHJpbmdaZGVlcGVyRGljdAgJawBN
AOQAcwBzAGkAZwAsACAATQBhAN9fEBU8aGVsbG8gJiAnaGknIHRoZXJlIT7T
HyAhIiMkUWFRYlFjEBEjQEBAAAAAAACjJSYnEAEQAlR0ZXh0Iz/gAAAAAAAA
pSorLCMtUUFRQhAMoyUmLhADE////+1foOAAE//////////7VkRvb2RhaNCg
EQLYoTZPEPo8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmlu
YXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBv
ZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4A
AQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBn
dW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDTTxiaW5hcnkgZ3Vu
az5fEBdUaGF0IHdhcyBhIHVuaWNvZGUga2V5LgAIACsAMwA8AEIASABPAFUA
ZwB0AHwAiACUAJoApQCuALsAygDTAOQA7QD4AQQBDwEdASsBNgE3ATgBTwFn
AW4BcAFyAXQBdgF/AYMBhQGHAYwBlQGbAZ0BnwGhAaUBpwGwAbkBwAHBAcIB
xQHHAsQC0gAAAAAAAAIBAAAAAAAAADkAAAAAAAAAAAAAAAAAAALs'''),
}
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self, fmt=None):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aBigInt = 2 ** 63 - 44,
aBigInt2 = 2 ** 63 + 44,
aNegativeInt = -5,
aNegativeBigInt = -80000000000,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = b"<binary gunk>",
someMoreData = b"<lots of binary gunk>\0\1\2\3" * 10,
nestedData = [b"<lots of binary gunk>\0\1\2\3" * 10],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
anEmptyDict = dict(),
anEmptyList = list()
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
with open(support.TESTFN, 'wb') as fp:
plistlib.dump(pl, fp)
with open(support.TESTFN, 'rb') as fp:
pl2 = plistlib.load(fp)
self.assertEqual(dict(pl), dict(pl2))
self.assertRaises(AttributeError, plistlib.dump, pl, 'filename')
self.assertRaises(AttributeError, plistlib.load, 'filename')
def test_invalid_type(self):
pl = [ object() ]
for fmt in ALL_FORMATS:
with self.subTest(fmt=fmt):
self.assertRaises(TypeError, plistlib.dumps, pl, fmt=fmt)
def test_int(self):
for pl in [0, 2**8-1, 2**8, 2**16-1, 2**16, 2**32-1, 2**32,
2**63-1, 2**64-1, 1, -2**63]:
for fmt in ALL_FORMATS:
with self.subTest(pl=pl, fmt=fmt):
data = plistlib.dumps(pl, fmt=fmt)
pl2 = plistlib.loads(data)
self.assertIsInstance(pl2, int)
self.assertEqual(pl, pl2)
data2 = plistlib.dumps(pl2, fmt=fmt)
self.assertEqual(data, data2)
for fmt in ALL_FORMATS:
for pl in (2 ** 64 + 1, 2 ** 127-1, -2**64, -2 ** 127):
with self.subTest(pl=pl, fmt=fmt):
self.assertRaises(OverflowError, plistlib.dumps,
pl, fmt=fmt)
|
yufeldman/arrow
|
python/pyarrow/tests/test_tensor.py
|
Python
|
apache-2.0
| 4,521
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import numpy as np
import pyarrow as pa
def test_tensor_attrs():
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
assert tensor.ndim == 2
assert tensor.size == 40
assert tensor.shape == data.shape
assert tensor.strides == data.strides
assert tensor.is_contiguous
assert tensor.is_mutable
# not writeable
data2 = data.copy()
data2.flags.writeable = False
tensor = pa.Tensor.from_numpy(data2)
assert not tensor.is_mutable
def test_tensor_base_object():
tensor = pa.Tensor.from_numpy(np.random.randn(10, 4))
n = sys.getrefcount(tensor)
array = tensor.to_numpy() # noqa
assert sys.getrefcount(tensor) == n + 1
@pytest.mark.parametrize('dtype_str,arrow_type', [
('i1', pa.int8()),
('i2', pa.int16()),
('i4', pa.int32()),
('i8', pa.int64()),
('u1', pa.uint8()),
('u2', pa.uint16()),
('u4', pa.uint32()),
('u8', pa.uint64()),
('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())
])
def test_tensor_numpy_roundtrip(dtype_str, arrow_type):
dtype = np.dtype(dtype_str)
data = (100 * np.random.randn(10, 4)).astype(dtype)
tensor = pa.Tensor.from_numpy(data)
assert tensor.type == arrow_type
repr(tensor)
result = tensor.to_numpy()
assert (data == result).all()
def _try_delete(path):
import gc
gc.collect()
try:
os.remove(path)
except os.error:
pass
def test_tensor_ipc_roundtrip(tmpdir):
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-roundtrip')
mmap = pa.create_memory_map(path, 1024)
pa.write_tensor(tensor, mmap)
mmap.seek(0)
result = pa.read_tensor(mmap)
assert result.equals(tensor)
def test_tensor_ipc_strided(tmpdir):
data1 = np.random.randn(10, 4)
tensor1 = pa.Tensor.from_numpy(data1[::2])
data2 = np.random.randn(10, 6, 4)
tensor2 = pa.Tensor.from_numpy(data2[::, ::2, ::])
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-strided')
mmap = pa.create_memory_map(path, 2048)
for tensor in [tensor1, tensor2]:
mmap.seek(0)
pa.write_tensor(tensor, mmap)
mmap.seek(0)
result = pa.read_tensor(mmap)
assert result.equals(tensor)
def test_tensor_equals():
def eq(a, b):
assert a.equals(b)
assert a == b
assert not (a != b)
def ne(a, b):
assert not a.equals(b)
assert not (a == b)
assert a != b
data = np.random.randn(10, 6, 4)[::, ::2, ::]
tensor1 = pa.Tens
|
or.from_numpy(data)
tensor2 = pa.Tensor.fr
|
om_numpy(np.ascontiguousarray(data))
eq(tensor1, tensor2)
data = data.copy()
data[9, 0, 0] = 1.0
tensor2 = pa.Tensor.from_numpy(np.ascontiguousarray(data))
ne(tensor1, tensor2)
def test_tensor_hashing():
# Tensors are unhashable
with pytest.raises(TypeError, match="unhashable"):
hash(pa.Tensor.from_numpy(np.arange(10)))
def test_tensor_size():
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
assert pa.get_tensor_size(tensor) > (data.size * 8)
def test_read_tensor(tmpdir):
# Create and write tensor tensor
data = np.random.randn(10, 4)
tensor = pa.Tensor.from_numpy(data)
data_size = pa.get_tensor_size(tensor)
path = os.path.join(str(tmpdir), 'pyarrow-tensor-ipc-read-tensor')
write_mmap = pa.create_memory_map(path, data_size)
pa.write_tensor(tensor, write_mmap)
# Try to read tensor
read_mmap = pa.memory_map(path, mode='r')
array = pa.read_tensor(read_mmap).to_numpy()
np.testing.assert_equal(data, array)
|
AloneRoad/waskr
|
waskr/tests/test_database.py
|
Python
|
mit
| 5,538
| 0.016071
|
import unittest
from time import strftime, time, gmtime
from pymongo import Connection
from waskr.database import Stats
config = {
'db_engine': 'mongodb',
'db_host': 'localhost',
'db_port': 27017,
}
class TestDatabase(unittest.TestCase):
def __init__(self, *args, **params):
unittest.TestCase.__init__(self, *args, **params)
connection = Connection(
config['db_host'],
config['db_port'])
self.waskr = connection['test_waskr']
self.stats = self.wa
|
skr['stats']
self.users = self.waskr['user']
self.db = Stats(config, test=True)
self.single_stat = dict(
time = 9999,
respon
|
se = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
def setUp(self):
"""Creates a new empty database for testing"""
connection = Connection(
config['db_host'],
config['db_port'])
waskr = connection['test_waskr']
waskr.drop_collection('user')
waskr.drop_collection('stats')
stats = waskr['stats']
users = waskr['user']
def tearDown(self):
"""Removes the database previously created"""
connection = Connection(
config['db_host'],
config['db_port'])
# make sure there is not a previous instance:
waskr = connection['test_waskr']
waskr.drop_collection('user')
waskr.drop_collection('stats')
def test_connection(self):
try:
Stats(config, test=True)
db_conn = True
except:
db_conn = False
self.assertTrue(db_conn)
def test_insert_validate_data(self):
data = {}
data['time'] = 9999
data['response'] = 9999
data['url'] = '/'
data['server_id'] = '1'
data['application'] = 'foo'
self.db.insert([data])
item = [i for i in self.stats.find()]
actual = item[0]
self.assertEqual(actual['time'], data['time'])
self.assertEqual(actual['response'], data['response'])
self.assertEqual(actual['url'], data['url'])
self.assertEqual(actual['application'], data['application'])
self.assertEqual(actual['server_id'], data['server_id'])
def test_insert_count(self):
data = {}
data['time'] = 9999
data['response'] = 9999
data['url'] = '/'
data['server_id'] = '1'
data['application'] = 'foo'
self.db.insert([data])
self.assertEqual(self.stats.count(), 1)
def test_last_insert(self):
current_time = time()
struct = gmtime(current_time)
formatted = strftime('%Y-%m-%d %H:%M:%S', struct)
stats = dict(
time = current_time,
response = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
self.db.insert([stats])
actual = self.db.last_insert()
self.assertEqual(actual, formatted)
def test_app_nodes(self):
self.db.insert([self.single_stat])
actual = self.db.apps_nodes()
expected = [(u'foo', u'1')]
self.assertEqual(actual, expected)
def test_response_time_out_of_range(self):
"""An out of range time should return an empty list """
self.db.insert([self.single_stat])
actual = self.db.response_time(1)
expected = []
self.assertEqual(actual, expected)
def test_response_time_in_range(self):
current_time = int(time())
stats = dict(
time = current_time,
response = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
self.db.insert([stats])
actual = self.db.response_time(120)
expected = [[current_time*1000, 9999]]
self.assertEqual(actual, expected)
def test_response_time_in_miliseconds(self):
current_time = int(time())
stats = dict(
time = current_time,
response = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
self.db.insert([stats])
response = self.db.response_time(120)
actual = response[0][0]
expected = current_time*1000
self.assertEqual(actual, expected)
def test_request_time(self):
current_time = int(time())
stats = dict(
time = current_time,
response = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
self.db.insert([stats])
actual = self.db.request_time(120)
expected = [[current_time*1000, 1]]
self.assertEqual(actual, expected)
def test_request_time_out_of_range(self):
current_time = int(time()) - 20000
stats = dict(
time = current_time,
response = 9999,
url = '/',
application = 'foo',
server_id = '1'
)
self.db.insert([stats])
actual = self.db.response_time(120)
expected = []
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
mapado/CatchMemeAll
|
twitter_server.py
|
Python
|
apache-2.0
| 763
| 0.002621
|
import tweepy
import os
from flask import Flask, make_response, jsonify
CONSUMER_KEY = os.environ['CATCHMEMEAL
|
L_TWITTER_CONSUMER_TOKEN']
CONSUMER_SECRET = os.environ['CATCHMEMEALL_TWITTER_CONSUMER_SECRET']
app = Flask(__name__)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
api = tweepy.API(auth)
@app.route('/twitter/ava
|
tar/<username>', methods=['GET'])
def get_user_avatar(username):
try:
user = api.get_user(username)
except tweepy.TweepError:
return make_response(jsonify({'error': 'no username %s' % (username)}), 404)
else:
json_data = {'avatar': user.profile_image_url}
return make_response(jsonify(json_data), 200)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8888, debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.