repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
voitureblanche/projet-secret
|
work/Python-toolchain/3D/build_cosine_tables.py
|
Python
|
mit
| 1,178
| 0.043294
|
import os
import string
import codecs
import ast
import math
from vector3 import Vector3
filename_out = "../../
|
Assets/cosine_table"
table_size = 512
fixed_point_precision = 512
def dumpCosine(_cosine_func, display_name, f):
f.write('const int ' + display_name + '[] =' + '\n')
f.write('{' + '\n')
# _str_out = '\t'
for angle in range(0,table_size):
_cos = int(_cosine_func(angle * math.pi / (table_size / 2.0)) * fixed_point_precision)
_str
|
_out = str(_cos) + ','
f.write(_str_out + '\n')
# if angle%10 == 9:
# f.write(_str_out + '\n')
# _str_out = '\t'
f.write('};' + '\n')
def main():
## Creates the header
f = codecs.open(filename_out + '.h', 'w')
f.write('#define COSINE_TABLE_LEN ' + str(table_size) + '\n')
f.write('\n')
f.write('extern const int tcos[COSINE_TABLE_LEN];' + '\n')
f.write('extern const int tsin[COSINE_TABLE_LEN];' + '\n')
f.close()
## Creates the C file
f = codecs.open(filename_out + '.c', 'w')
dumpCosine(_cosine_func = math.cos, display_name = 'tcos', f = f)
f.write('\n')
dumpCosine(_cosine_func = math.sin, display_name = 'tsin', f = f)
f.close()
main()
|
genodeftest/exaile
|
xlgui/preferences/plugin.py
|
Python
|
gpl-2.0
| 11,178
| 0.000716
|
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import GLib
from gi.repository import Gtk
import xl.unicode
from xl import event, main, plugins, xdg
from xlgui.widgets import common, dialogs
fro
|
m xl.nls import gettext as _, ngettext
import logging
logger = logging.getLogger(__name__)
name = _('Plugins')
ui = xdg.get_data_path('ui', 'preferences', 'plugin.ui')
class PluginManager(object):
"""
Gui to manage plugins
"""
def __
|
init__(self, preferences, builder):
"""
Initializes the manager
"""
self.preferences = preferences
builder.connect_signals(self)
self.plugins = main.exaile().plugins
self.message = dialogs.MessageBar(
parent=builder.get_object('preferences_pane'), buttons=Gtk.ButtonsType.CLOSE
)
self.message.connect('response', self.on_messagebar_response)
self.list = builder.get_object('plugin_tree')
self.enabled_cellrenderer = builder.get_object('enabled_cellrenderer')
if main.exaile().options.Debug:
reload_cellrenderer = common.ClickableCellRendererPixbuf()
reload_cellrenderer.props.icon_name = 'view-refresh'
reload_cellrenderer.props.xalign = 1
reload_cellrenderer.connect('clicked', self.on_reload_cellrenderer_clicked)
name_column = builder.get_object('name_column')
name_column.pack_start(reload_cellrenderer, True)
name_column.add_attribute(reload_cellrenderer, 'visible', 3)
self.version_label = builder.get_object('version_label')
self.author_label = builder.get_object('author_label')
self.name_label = builder.get_object('name_label')
self.description = builder.get_object('description_view')
self.model = builder.get_object('model')
self.filter_model = self.model.filter_new()
self.show_incompatible_cb = builder.get_object('show_incompatible_cb')
self.filter_model.set_visible_func(self._model_visible_func)
selection = self.list.get_selection()
selection.connect('changed', self.on_selection_changed)
self._load_plugin_list()
self._evt_rm1 = event.add_ui_callback(
self.on_plugin_event, 'plugin_enabled', None, True
)
self._evt_rm2 = event.add_ui_callback(
self.on_plugin_event, 'plugin_disabled', None, False
)
self.list.connect('destroy', self.on_destroy)
GLib.idle_add(selection.select_path, (0,))
GLib.idle_add(self.list.grab_focus)
def _load_plugin_list(self):
"""
Loads the plugin list
"""
plugins = self.plugins.list_installed_plugins()
uncategorized = _('Uncategorized')
plugins_dict = {uncategorized: []}
failed_list = []
self.plugin_to_path = {}
for plugin_name in plugins:
try:
info = self.plugins.get_plugin_info(plugin_name)
compatible = self.plugins.is_compatible(info)
broken = self.plugins.is_potentially_broken(info)
except Exception:
failed_list += [plugin_name]
continue
# determine icon to show
if not compatible:
icon = 'dialog-error'
elif broken:
icon = 'dialog-warning'
else:
icon = None
enabled = plugin_name in self.plugins.enabled_plugins
plugin_data = (
plugin_name,
info['Name'],
str(info['Version']),
enabled,
icon,
broken,
compatible,
True,
)
if 'Category' in info:
cat = plugins_dict.setdefault(info['Category'], [])
cat.append(plugin_data)
else:
plugins_dict[uncategorized].append(plugin_data)
self.list.set_model(None)
self.model.clear()
def categorykey(item):
if item[0] == uncategorized:
return '\xff' * 10
return xl.unicode.strxfrm(item[0])
plugins_dict = sorted(plugins_dict.iteritems(), key=categorykey)
for category, plugins_list in plugins_dict:
plugins_list.sort(key=lambda x: xl.unicode.strxfrm(x[1]))
it = self.model.append(
None, (None, category, '', False, '', False, True, False)
)
for plugin_data in plugins_list:
pit = self.model.append(it, plugin_data)
path = self.model.get_string_from_iter(pit)
self.plugin_to_path[plugin_data[0]] = path
self.list.set_model(self.filter_model)
# TODO: Keep track of which categories are already expanded, and only expand those
self.list.expand_all()
if failed_list:
self.message.show_error(
_('Could not load plugin info!'),
ngettext('Failed plugin: %s', 'Failed plugins: %s', len(failed_list))
% ', '.join(failed_list),
)
def on_destroy(self, widget):
self._evt_rm1()
self._evt_rm2()
def on_messagebar_response(self, widget, response):
"""
Hides the messagebar if requested
"""
if response == Gtk.ResponseType.CLOSE:
widget.hide()
def on_plugin_tree_row_activated(self, tree, path, column):
"""
Enables or disables the selected plugin
"""
self.enabled_cellrenderer.emit('toggled', path[0])
def on_reload_cellrenderer_clicked(self, cellrenderer, path):
"""
Reloads a plugin from scratch
"""
plugin_name = self.filter_model[path][0]
enabled = self.filter_model[path][3]
if enabled:
try:
self.plugins.disable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not disable plugin!'), str(e))
return
logger.info('Reloading plugin %s...', plugin_name)
self.plugins.load_plugin(plugin_name, reload_plugin=True)
if enabled:
try:
self.plugins.enable_plugin(plugin_name)
except Exception as e:
self.message.show_error(_('Could not enable plugin!'), str(e))
return
def on_install_plugin_button_clicked(self, button):
"""
Shows a dialog allowing the user to choose a plugin to install
from the filesystem
"""
dialog = Gtk.FileChooserDialog(
_('Choose a Plugin'),
self.preferences.parent,
buttons=(
Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_ADD,
Gtk.ResponseType.OK,
),
)
f
|
peragro/peragro-at
|
src/damn_at/analyzers/image/analyzerimage.py
|
Python
|
bsd-3-clause
| 3,097
| 0.000323
|
"""
Generic Image analyzer.
"""
# Standard
import os
import logging
import subprocess
# Damn
from damn_at import (
mimetypes,
MetaDataType,
MetaDataValue,
FileId,
FileDescription,
AssetDescription,
AssetId
)
from damn_at.pluginmanager import IAnalyzer
from damn_at.analyzer import AnalyzerException
LOG = logging.getLogger(__name__)
class GenericImageAnalyzer(IAnalyzer):
"""Generic Image analyzer."""
handled_types = ["image/x-ms-bmp", "image/jpeg", "image/png", "image/gif",
"image/x-photoshop", "image/tiff", "application/x-xcf"]
def __init__(self):
super(GenericImageAnalyzer, self).__init__()
def activate(self):
pass
def analyze(self, an_uri):
fileid = FileId(filename=os.path.abspath(an_uri))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
image_mimetype = mimetypes.guess_type(an_uri)[0]
asset_descr = AssetDescription(asset=AssetId(
subname='main layer',
mimetype=image_mimetype,
file=fileid
))
try:
pro = subprocess.Popen(
['exiftool', an_uri],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
msg = 'ImageAnalyzer failed %s with error code %d!:\n%s' % (
an_uri,
pro.returncode,
str(err)
)
LOG.error(msg)
raise AnalyzerException(msg)
except OSError as e:
msg = 'ImageAnalyzer failed %s:\n%s' % (an_uri, e)
LOG.error(msg)
raise OSError(msg)
meta = {}
flag = 0
lines = str(out).strip().split('\n')
for line in lines:
line = line.split(':', 1)
if len(line) == 1:
line = line[0].split('=')
line = [l.strip() for l in line]
if line[0] == 'MIME Type':
flag = 1
if flag == 1 and line[0] not in ['MIME Type', 'Image Size']:
meta[line[0].lower().replace(' ', '_')] = line[1]
from damn_at.analyzers.image import metadata
extractor_map = {
'image/png': metadata.MetaDataPNG,
'image/jpeg': metadata.MetaDataJPG,
'image/x-ms-bmp': metadata.MetaDataBMP,
'image/x-photoshop': metadata.MetaDataPSD,
'application/x-xcf': metadata.MetaDataXCF,
}
if image_mimetype in extractor_map:
asset_descr.metadata = extractor_map[image_mimetype].extract(meta)
else:
asset_descr.metadata = {}
for key, value in meta.items():
if key not in asset_descr.metadata:
asset_descr.metadata['exif-' + key] = MetaDataValue(
type=MetaDataType.STRING,
|
string_value=value
)
|
file_descr.assets.append(asset_descr)
return file_descr
|
nguyenkims/projecteuler-python
|
src/p102.py
|
Python
|
mit
| 1,087
| 0.103036
|
limit = 10 ** 4
def isOK(a1,a2,a3,m):
'''test if m is in the same plan as a3 vis-a-vis to a1a2'''
x1, y1= float(a1[0]), float(a1[1])
x2,y2= float(a2[0]), float(a2[1])
x3,y3=float(a3[0]), float(a3[1])
x,y=float(m[0
|
]), float(m[1])
t = (x-x1) * (y2-y1) - (y-y1) * (x2-x1)
k = (x3-x1) * (y2-y1) - (y3-y1) * (x2-x1)
if t*k > 0:
return True
return False
def isInterior(a1,a2,a3,m) :
'''test if m is in the triangle
|
formed by a,b,c
'''
if isOK(a1,a2,a3,m) and isOK(a2,a3,a1,m) and isOK(a3,a1,a2,m):
return True
def test():
a1 =(-340,495)
a2= (-153,-910)
a3 = (835,-947)
X= (-175,41)
Y= (-421,-714)
Z = (574,-645)
m = (0,0)
print isInterior(a1,a2,a3,m), isInterior(X,Y,Z,m)
print intersection(X,m,Y,Z)
# test()
def main():
inp= file('triangles.txt')
count = 0
O = [0,0]
TRI= []
t = inp.readline()
while (t!=""):
l= t.strip().split(',')
x = [int(l[0]), int(l[1])]
y = [int(l[2]), int(l[3])]
z = [int(l[4]), int(l[5])]
if isInterior(x,y,z,O):
count +=1
# print x,y,z
# else: count+=1
t = inp.readline()
print 'count', count
main()
|
jamesandreou/hackerrank-solutions
|
warmup/hr_time_conversion.py
|
Python
|
mit
| 298
| 0.026846
|
# hackerrank - Algo
|
rithms: Time Conversion
# Written by James Andreou, University of Waterloo
S = raw_input()
TYPE = S[len(S)-2]
if S[:2] == "12":
if TYPE == "A":
print "00" + S[2:-2]
else:
print S[:-2]
elif TYPE == "P":
HOUR = int(S[:2]) + 12
print str(HOUR) + S[2:-2]
else:
print
|
S[:-2]
|
Castronova/EMIT
|
api_old/ODM2/Sensors/services/__init__.py
|
Python
|
gpl-2.0
| 319
| 0.00627
|
__author__ = 'Stephanie'
from ODMconnection import dbconnection
from
|
readSensors import readSensors
from up
|
dateSensors import updateSensors
from createSensors import createSensors
from deleteSensors import deleteSensors
__all__ = [
'readSensors',
'updateSensors',
'createSensors',
'deleteSensors',
]
|
garinh/cs
|
docs/support/docutils/writers/latex2e.py
|
Python
|
lgpl-2.1
| 75,964
| 0.00387
|
"""
:Author: Engelbert Gruber
:Contact: grubert@users.sourceforge.net
:Revision: $Revision: 21817 $
:Date: $Date: 2005-07-21 13:39:57 -0700 (Thu, 21 Jul 2005) $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
"""
__docformat__ = 'reStructuredText'
# code contributions from several people included, thanks to all.
# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # e.g. ##.
import sys
import time
import re
import string
from types import ListType
from docutils import frontend, nodes, languages, writers, utils
class Writer(writers.Writer):
supported = ('latex','latex2e')
"""Formats this writer supports."""
settings_spec = (
'LaTeX-Specific Options',
'The LaTeX "--output-encoding" default is "latin-1:strict".',
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
('Specify document options. Multiple options can be given, '
'separated by commas. Default is "10pt,a4paper".',
['--documentoptions'],
{'default': '10pt,a4paper', }),
('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). '
'Default: no, uses figures.',
['--use-latex-footnotes'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default':
|
'superscript',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Use LaTeX citations. '
'Default: no, uses figures which might get mixed with images.',
|
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Specify a stylesheet file. The file will be "input" by latex in '
'the document header. Default is no stylesheet (""). '
'Overrides --stylesheet-path.',
['--stylesheet'],
{'default': '', 'metavar': '<file>',
'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. Overrides --stylesheet.',
['--stylesheet-path'],
{'metavar': '<file>', 'overrides': 'stylesheet'}),
('Table of contents by docutils (default) or latex. Latex (writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.',
['--use-latex-toc'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Let LaTeX print author and date, do not show it in docutils '
'document info.',
['--use-latex-docinfo'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
['--hyperlink-color'], {'default': 'blue'}),
('Enable compound enumerators for nested enumerated lists '
'(e.g. "1.2.a.ii"). Default: disabled.',
['--compound-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compound enumerators for nested enumerated lists. This is '
'the default.',
['--no-compound-enumerators'],
{'action': 'store_false', 'dest': 'compound_enumerators'}),
('Enable section ("." subsection ...) prefixes for compound '
'enumerators. This has no effect without --compound-enumerators. '
'Default: disabled.',
['--section-prefix-for-enumerators'],
{'default': None, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable section prefixes for compound enumerators. '
'This is the default.',
['--no-section-prefix-for-enumerators'],
{'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
('Set the separator between section number and enumerator '
'for compound enumerated lists. Default is "-".',
['--section-enumerator-separator'],
{'default': '-', 'metavar': '<char>'}),
('When possibile, use verbatim for literal-blocks. '
'Default is to always use the mbox environment.',
['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "nolines". '
'Default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines'], 'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option. '
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
('LaTeX font encoding. '
'Possible values are "T1", "OT1", "" or some other fontenc option. '
'The font encoding influences available symbols, e.g. "<<" as one '
'character. Default is "" which leads to package "ae" (a T1 '
'emulation using CM fonts).',
['--font-encoding'],
{'default': ''}),
),)
settings_defaults = {'output_encoding': 'latin-1'}
relative_path_settings = ('stylesheet_path',)
config_section = 'latex2e writer'
config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = LaTeXTranslator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
self.head_prefix = visitor.head_prefix
self.head = visitor.head
self.body_prefix = visitor.body_prefix
self.body = visitor.body
self.body_suffix = visitor.body_suffix
"""
Notes on LaTeX
--------------
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
*Bug* inside a minipage a (e.g. Sidebar) the linewidth is
not changed, needs fix in docutils so that tables
are not too wide.
So we add locallinewidth set it initially and
on entering sidebar and reset on exit.
"""
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL = {
'no': 'norsk', #XXX added by hand ( forget about nynorsk?)
'gd': 'scottish', #XXX added by hand
'hu': 'magyar', #XXX added by hand
'pt': 'portuguese',#XXX added by hand
'sl': 'slovenian',
'af': 'afrikaans',
'bg': 'bulgarian',
'br': 'breton',
'ca': 'catalan',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'fr': 'french',
# french, francais, canadien, acadian
'de': 'ngerman', #XXX rather than german
|
ingve/IncludeOS
|
test/fs/integration/ide_write/test.py
|
Python
|
apache-2.0
| 663
| 0.007541
|
#! /usr/bin/env python
import sys
import os
import subprocess
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from vmrunner import vmrunner
#
|
Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def cleanup():
# Call the cleanup script - let python do the printing to get it synced
print subprocess.check_output(["./fat32_disk.sh", "clean"])
# Setup
|
disk
subprocess.call(["./fat32_disk.sh"], shell=True)
# Clean up on exit
vm.on_exit(cleanup)
# Boot the VM
vm.cmake().boot(30).clean()
|
gustavofonseca/penne-core
|
frontdesk/templatetags/frontdesk.py
|
Python
|
bsd-2-clause
| 1,931
| 0.002071
|
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
STATUS_COLORS = {
'default': 'blue',
'queued': 'blue',
'undetermined': 'blue',
'infected': 'red',
'uninfect
|
ed': 'green',
'deposited': 'blue',
'rejected': 'red',
'accepted': 'green',
'valid': 'green',
'invalid': 'red',
'undefined': 'blue'
}
BOX_COLORS = {
'blue': 'primary',
'red': 'danger',
'green': 'success',
'grey': 'default'
}
@register
|
.filter
@stringfilter
def status_color(status):
"""
This method will return grey for a unkown status.
"""
return STATUS_COLORS.get(status, 'grey')
@register.filter
def box_color(status):
"""
This method will return grey for a unkown status.
"""
return BOX_COLORS.get(STATUS_COLORS.get(status, 'grey'), 'default')
@register.filter
def status_sps(status):
"""
This method will return valid, invalid or undefined for a given result of
models.PackageMember.sps_validation_status().
status: Tuple(None, {})
status: Tuple(True, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
status: Tuple(False, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
"""
if status[0] is True:
return 'valid'
if status[0] is False:
return 'invalid'
return 'undefined'
@register.filter
def widget_scielops_colors_weight(xmls):
"""
This method will return a color for the SciELO PS widget. The color will
be matched according to the error level of any of the members of the package.
status: Dict with xml's returned by models.Package.xmls().
"""
if len(xmls['invalid']) > 0:
return STATUS_COLORS['invalid']
if len(xmls['undefined']) > 0:
return STATUS_COLORS['undefined']
if len(xmls['valid']) == 0:
return STATUS_COLORS['undefined']
return STATUS_COLORS['valid']
|
matus-stehlik/glowing-batman
|
events/migrations/0002_auto__add_unique_campuserinvitation_camp_user.py
|
Python
|
mit
| 10,021
| 0.007983
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CampUserInvitation', fields ['camp', 'user']
db.create_unique(u'events_campuserinvitation', ['camp_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'CampUserInvitation', fields ['camp', 'user']
db.delete_unique(u'events_campuserinvitation', ['camp_id', 'user_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_t
|
ype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [
|
], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'competitions.competition': {
'Meta': {'ordering': "['name']", 'object_name': 'Competition'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'competitions.season': {
'Meta': {'ordering': "['competition', 'year', 'number']", 'object_name': 'Season'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.camp': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Camp', '_ormbases': [u'events.Event']},
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['events.Event']", 'unique': 'True', 'primary_key': 'True'}),
'invitation_deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'invited': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.CampUserInvitation']", 'symmetrical': 'False'}),
'limit': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'season': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['competitions.Season']", 'null': 'True', 'blank': 'True'})
},
u'events.campuserinvitation': {
'Meta': {'ordering': "(u'_order',)", 'unique_together': "(('user', 'camp'),)", 'object_name': 'CampUserInvitation'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'camp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['events.Camp']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_as': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'org_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'user_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_accepted_timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
u'events.event': {
'Meta': {'ordering': "['-start_time', 'end_time']", 'object_name': 'Event'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_created'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events_modified'", 'null': 'True', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'registered_org': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'organized_event_set'", 'symmetrical': 'False', 'through': u"orm['events.EventOrgRegistration']", 'to': u"orm['auth.User']"}),
'registered_user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'through': u"orm['events.EventUserRegistration']", 'symmetrical': 'Fal
|
aronsky/home-assistant
|
homeassistant/components/rfxtrx/helpers.py
|
Python
|
apache-2.0
| 715
| 0
|
"""Provides helpers for R
|
FXtrx."""
from RFXtrx import get_device
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import HomeAssistantType
@callback
def async_get_device_object(hass: HomeAssistantType, device_id):
"""Get a device for the given device registry id."""
device_registry = dr.async_get(hass)
registry_device = device_registry.async_g
|
et(device_id)
if registry_device is None:
raise ValueError(f"Device {device_id} not found")
device_tuple = list(list(registry_device.identifiers)[0])
return get_device(
int(device_tuple[1], 16), int(device_tuple[2], 16), device_tuple[3]
)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_application_security_groups_operations.py
|
Python
|
mit
| 24,325
| 0.005015
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, Poll
|
ingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-impo
|
rt,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations(object):
"""ApplicationSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
application_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApplicationSecurityGroup"
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_securit
|
dichen001/Go4Jobs
|
JoeXu/42. Trapping rain water.py
|
Python
|
gpl-3.0
| 616
| 0.027597
|
class Solution(object):
def trap(self, height):
"""
:t
|
ype height: List[int]
:rtype: int
"""
l=len(height)
maxheight=[0 for i in range(l)]
leftmax=0
rightmax=0
res=0
for i in range(l):
if height[i]>leftmax:
leftmax=height[i]
maxheight[i]=leftmax
for i in reversed(range(l)):
if height[i]>rightmax:
rightmax=height[i]
if min(rightmax,maxheight[i])-height[i]>0:
res+=min(rightmax,maxheight[i]
|
)-height[i]
return res
|
chubbymaggie/miasm
|
miasm2/analysis/expression_range.py
|
Python
|
gpl-2.0
| 2,613
| 0.000383
|
"""Naive range analysis fo
|
r expression"""
from miasm2.analysis.modul
|
arintervals import ModularIntervals
_op_range_handler = {
"+": lambda x, y: x + y,
"&": lambda x, y: x & y,
"|": lambda x, y: x | y,
"^": lambda x, y: x ^ y,
"*": lambda x, y: x * y,
">>": lambda x, y: x >> y,
"a>>": lambda x, y: x.arithmetic_shift_right(y),
"<<": lambda x, y: x << y,
">>": lambda x, y: x >> y,
">>>": lambda x, y: x.rotation_right(y),
"<<<": lambda x, y: x.rotation_left(y),
}
def expr_range(expr):
"""Return a ModularIntervals containing the range of possible values of
@expr"""
max_bound = (1 << expr.size) - 1
if expr.is_int():
return ModularIntervals(expr.size, [(int(expr), int(expr))])
elif expr.is_id() or expr.is_mem():
return ModularIntervals(expr.size, [(0, max_bound)])
elif expr.is_slice():
interval_mask = ((1 << expr.start) - 1) ^ ((1 << expr.stop) - 1)
arg = expr_range(expr.arg)
# Mask for possible range, and shift range
return ((arg & interval_mask) >> expr.start).size_update(expr.size)
elif expr.is_compose():
sub_ranges = [expr_range(arg) for arg in expr.args]
args_idx = [info[0] for info in expr.iter_args()]
# No shift for the first one
ret = sub_ranges[0].size_update(expr.size)
# Doing it progressively (2 by 2)
for shift, sub_range in zip(args_idx[1:], sub_ranges[1:]):
ret |= sub_range.size_update(expr.size) << shift
return ret
elif expr.is_op():
# A few operation are handled with care
# Otherwise, overapproximate (ie. full range interval)
if expr.op in _op_range_handler:
sub_ranges = [expr_range(arg) for arg in expr.args]
return reduce(_op_range_handler[expr.op],
(sub_range for sub_range in sub_ranges[1:]),
sub_ranges[0])
elif expr.op == "-":
assert len(expr.args) == 1
return - expr_range(expr.args[0])
elif expr.op == "%":
assert len(expr.args) == 2
op, mod = [expr_range(arg) for arg in expr.args]
if mod.intervals.length == 1:
# Modulo intervals is not supported
return op % mod.intervals.hull()[0]
# Operand not handled, return the full domain
return ModularIntervals(expr.size, [(0, max_bound)])
elif expr.is_cond():
return expr_range(expr.src1).union(expr_range(expr.src2))
else:
raise TypeError("Unsupported type: %s" % expr.__class__)
|
CVML/winpython
|
winpython/py3compat.py
|
Python
|
mit
| 6,585
| 0.003949
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2013 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
spyderlib.py3compat
-------------------
Transitional module providing compatibility functions intended to help
migrating from Python 2 to Python 3.
This module should be fully compatible with:
* Python >=v2.6
* Python 3
"""
from __future__ import print_function
import sys
import os
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
#==============================================================================
# Data types
#==============================================================================
if PY2:
# Python 2
TEXT_TYPES = (str, unicode)
INT_TYPES = (int, long)
else:
# Python 3
TEXT_TYPES = (str,)
INT_TYPES = (int,)
NUMERIC_TYPES = tuple(list(INT_TYPES) + [float, complex])
#==============================================================================
# Renamed/Reorganized modules
#==============================================================================
if PY2:
# Python 2
import __builtin__ as builtins
import ConfigParser as configparser
try:
import _winreg as winreg
except ImportError:
pass
from sys import maxint as maxsize
try:
import CStringIO as io
except ImportError:
import StringIO as io
try:
import cPickle as pickle
except ImportError:
import pickle
from UserDict import DictMixin as MutableMapping
import thread as _thread
import repr as reprlib
else:
# Python 3
import builtins
import configparser
try:
import winreg
except ImportError:
pass
from sys import maxsize
import io
import pickle
from collections import MutableMapping
import _thread
import reprlib
#==============================================================================
# Strings
#==============================================================================
if PY2:
# Python 2
import codecs
def u(obj):
"""Make unicode object"""
return codecs.unicode_escape_decode(obj)[0]
else:
# Python 3
def u(obj):
"""Return string as it is"""
return obj
def is_text_string(obj):
"""Return True if `obj` is a text string, False if it is anything else,
like binary data (Python 3) or QString (Python 2, PyQt API #1)"""
if PY2:
# Python 2
return isinstance(obj, basestring)
else:
# Python 3
return isinstance(obj, str)
def is_binary_string(obj):
"""Return True if `obj` is a binary string, False if it is anything else"""
if PY2:
# Python 2
return isinstance(obj, str)
else:
# Python 3
return isinstance(obj, bytes)
def is_string(obj):
"""Return True if `obj` is a text or binary Python string object,
False if it is anything else, like a QString (Python 2, PyQt API #1)"""
return is_text_string(obj) or is_binary_string(obj)
def is_unicode(obj):
"""Return True if `obj` is unicode"""
if PY2:
# Python 2
return isinstance(obj, unicode)
else:
# Python 3
return isinstance(obj, str)
def to_text_string(obj, encoding=None):
"""Convert `obj` to (unicode) text string"""
if PY2:
# Python 2
if encoding is None:
return unicode(obj)
else:
return unicode(obj, encoding)
else:
# Python 3
if encoding is None:
return str(obj)
elif isinstance(obj, str):
# In case this function is not used properly, this could happen
return obj
else:
return str(obj, encoding)
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding)
#==============================================================================
# Function attributes
#==============================================================================
def get_func_code(func):
"""Return function code object"""
if PY2:
# Python 2
return func.func_code
else:
# Python 3
return func.__code__
def get_func_name(func):
"""Return function name"""
if PY2:
# Python 2
return func.func_name
else:
# Python 3
return func.__name__
def get_func_defaults(func):
"""Return function default argument values"""
if PY2:
# Python 2
return func.func_defaults
else:
# Python 3
return func.__defaults__
#==============================================================================
# Special method attributes
#====================
|
==========================================================
def get_meth_func(obj):
"""Return method f
|
unction object"""
if PY2:
# Python 2
return obj.im_func
else:
# Python 3
return obj.__func__
def get_meth_class_inst(obj):
"""Return method class instance"""
if PY2:
# Python 2
return obj.im_self
else:
# Python 3
return obj.__self__
def get_meth_class(obj):
"""Return method class"""
if PY2:
# Python 2
return obj.im_class
else:
# Python 3
return obj.__self__.__class__
#==============================================================================
# Misc.
#==============================================================================
if PY2:
# Python 2
input = raw_input
getcwd = os.getcwdu
cmp = cmp
import string
str_lower = string.lower
from itertools import izip_longest as zip_longest
else:
# Python 3
input = input
getcwd = os.getcwd
def cmp(a, b):
return (a > b) - (a < b)
str_lower = str.lower
from itertools import zip_longest
def qbytearray_to_str(qba):
"""Convert QByteArray object to str in a way compatible with Python 2/3"""
return str(bytes(qba.toHex().data()).decode())
if __name__ == '__main__':
pass
|
b3c/VTK-5.8
|
Wrapping/Python/vtk/__helper.py
|
Python
|
bsd-3-clause
| 981
| 0.008155
|
""" This provides some useful code used by other modules. This is not to be
used by the end user which is why it is hidden. """
import string, sys
class LinkError(Exception):
pass
def refine_import_err(mod_name, extension_name, exc):
""" Checks to see if the ImportError was because the library
itself was not there or because there was a link error. If there
was a link error it raises a LinkError if not it does nothing.
Keyword arguments
-----------------
- mod_name : The name of the Python module that was imported.
- extension_name : The name of the extension module
|
that is to be
imported by the module having mod_name.
- exc : The exception raised when the module called mod_name was
imported.
To see example usage look at __init__.py.
"""
try:
del sys.modules['vtk.%s'%mod_name]
|
except KeyError:
pass
if string.find(str(exc), extension_name) == -1:
raise LinkError, str(exc)
|
laborautonomo/youtube-dl
|
youtube_dl/utils.py
|
Python
|
unlicense
| 42,818
| 0.002382
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
import contextlib
import ctypes
import datetime
import email.utils
import errno
import getpass
import gzip
import itertools
import io
import json
import locale
import math
import os
import pipes
import platform
import re
import ssl
import socket
import struct
import subprocess
import sys
import traceback
import xml.etree.ElementTree
import zlib
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
|
# Flush the final pct_sequence
string += pct_sequence.decode(
|
encoding, errors)
return string
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = _unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = _unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
u'TEST'.encode(pref)
except:
pref = 'UTF-8'
return pref
if sys.version_info < (3,0):
def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3,0):
def write_json_file(obj, fn):
with open(fn, 'wb') as f:
json.dump(obj, f)
else:
def write_json_file(obj, fn):
with open(fn, 'w', encoding='utf-8') as f:
json.dump(obj, f)
if sys.version_info >= (2,7):
def find_xpath_attr(node, xpath, key, val):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z]+$', key)
assert re.match(r'^[a-zA-Z0-9@\s:._]*$', val)
expr = xpath + u"[@%s='%s']" % (key, val)
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val):
for f in node.findall(xpath):
if f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with
the re.sub() function.
"""
entity = matchobj.group(1)
# Known non-
|
Alexis-benoist/CaTeX
|
tests/test_core.py
|
Python
|
apache-2.0
| 1,434
| 0.001395
|
# -*- coding: utf-8 -*-
from click import open_file
|
def read_file(path):
with open_file(path, 'r', encoding='utf8') as f:
return ''.join(f.readlines())
def test_import():
from catex import LaTeX
def test_import_():
import catex
def test_latex_simple():
from catex import LaTeX
f1 = LaTeX.from_file("tests/data/latex1.tex")
f1.merge(f1)
def test_merge():
from catex.core import merge
rv = merge("tests/data/latex1.tex",
|
"tests/data/latex2.tex")
expected_result = read_file("tests/data/merge1_2.tex")
assert rv.__repr__() == expected_result
def test_merge_packeges():
from catex.core import merge_packages
pkg1 = [
['th', ['mou', 'moi', 'mumu=tutu']],
['blo', []],
['bli', ['tut']],
['bli', []],
['bleh', []],
['bla', []]]
pkg2 = [
['th', ['mou', 'moi', 'mumu=tutu']],
['blo', []],
['bli', ['tut']],
['bli', []],
['bleh', []],
['bla', []]
]
pkg_rv = [
['th', ['mumu=tutu', 'mou', 'moi']],
['blo', []],
['bli', ['tut']],
['bli', ['tut']], ['bleh', []],
['bla', []]
]
assert merge_packages(pkg1, pkg2) == pkg_rv
def test_repr():
from catex.core import LaTeX
l = LaTeX.from_file("tests/data/latex_sorted.tex")
text = read_file("tests/data/latex_sorted.tex")
assert l.__repr__() == ''.join(text)
|
codeforamerica/mdc-feedback
|
feedback/surveys/views.py
|
Python
|
mit
| 1,698
| 0.002356
|
# -*- coding: utf-8 -*-
# DO NOT DELETE
import StringIO
import csv
import datetime
today = datetime.date.today()
from flask import (
Blueprint,
make_response
)
from flask.ext.login import login_required
from sqlalchemy import desc
from feedback.surveys.models import Survey
blueprint = Blueprint(
'surveys',
__name__,
url_prefix='/surveys',
static_folder="../static")
@blueprint.route('/download')
@login_required
def to_csv():
csvList = []
csvList.append([
'date_su
|
bmitted',
'method',
'language',
'route',
'rating',
'role',
'get_done',
'purpose',
'best',
'worst',
'improvement',
'follow_up',
'contact',
'more_comments'])
survey_models = Survey.query.order_by(desc(Survey.date_submitted)).all()
for survey_model in survey_models:
csvList.append([
survey_model.date_submitted,
survey_model.method,
|
survey_model.lang,
survey_model.route_en,
survey_model.rating,
survey_model.role_en,
survey_model.get_done,
survey_model.purpose_en,
survey_model.best_en,
survey_model.worst_en,
survey_model.improvement,
survey_model.follow_up,
survey_model.contact,
survey_model.more_comments])
strIO = StringIO.StringIO()
writer = csv.writer(strIO)
writer.writerows(csvList)
output = make_response(strIO.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
|
ytaben/cyphesis
|
rulesets/mason/world/tasks/Logging.py
|
Python
|
gpl-2.0
| 4,546
| 0.009899
|
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2005 Al Riddoch (See the file COPYING for details).
from atlas import *
from physics import *
from physics import Quaternion
from physics import Vector3D
import math
from random import *
import server
class Logging(server.Task):
""" A proof of concept task for logging."""
def cut_operation(self, op):
""" Op handler for cut op which activates this task """
# print "Logging.cut"
if len(op) < 1:
sys.stderr.write("Logging task has no target in cut op")
self.target = server.world.get_object_ref(op[0].id)
self.tool = op.to
def tick_operation(self, op):
""" Op handler for regular tick op """
# print "Logging.tick"
if self.target() is None:
# print "Target is no more"
self.irrelevant()
return
current_status = self.target().status
#Measure the distance between the entity horizontal edges. Else we won't be able to reach if either entity is too thick.
distance_between_entity_edges_squared = square_horizontal_edge_distance(self.character.location, self.target().location)
#Assume that a standard human can reach 1.5 meters, and use this to determine if we're close enough to be able to perform the logging
standard_human_reach_squared=1.5*1.5
if distance_between_entity_edges_squared > standard_human_reach_squared:
self.progress = 1 - current_status
self.rate = 0
return self.next_tick(1.75)
res=Oplist()
if current_status > 0.11:
set=Operation("set", Entity(self.target().id, status=current_status-0.1), to=self.target())
res.append(set)
# print "CHOP",current_status
normal=Vector3D(0,0,1)
# print "LOC.ori ", self.target().location.orientation
# calculate how tilted the tree is already
if self.target().location.orientation.is_valid():
normal.rotate(self.target().location.orientation)
# print "Normal ", normal, normal.dot(Vector3D(0,0,1))
# if the tree is standing, and it's already half cut down, rotate
# it to be horizontal, away from the character
if normal.dot(Vector3D(0,0,1)) > 0.8 and current_status < 0.5:
# print "Fall down"
# determine the axis of rotation by cross product of the vector
|
# from character to tree, and vertically upward vector
|
axis = distance_to(self.character.location,
self.target().location).cross(Vector3D(0,0,1))
# the axis must be a unit vector
try:
axis = axis.unit_vector()
except ZeroDivisionError:
axis = Vector3D(1,0,0)
# print "axis ", axis
# create a rotation of 90 degrees around this axis
orient = Quaternion(axis, math.pi / -2.0)
# if the tree is rotated, apply this too
if self.target().location.orientation.is_valid():
orient = self.target().location.orientation * orient
move_location = self.target().location.copy()
move_location.orientation = orient
move = Operation("move", Entity(self.target().id, mode='felled',
location=move_location),
to = self.target())
res.append(move)
else:
# print "become log"
set = Operation("set", Entity(self.target().id, status = -1),
to = self.target())
res.append(set)
create_loc = self.target().location.copy()
create_loc.orientation = self.target().location.orientation
create = Operation("create",
Entity(parents = ["lumber"],
mass = self.target().mass,
location = create_loc,
bbox = self.target().bbox),
to = self.target())
res.append(create)
self.progress = 1 - current_status
self.rate = 0.1 / 1.75
res.append(self.next_tick(1.75))
return res
|
lixiangning888/whole_project
|
modules/signatures_orignal/rat_spynet.py
|
Python
|
lgpl-3.0
| 1,986
| 0.006546
|
# Copyright (C) 2014 @threatlead
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
fro
|
m lib.cuckoo.common.abstracts import Signature
class SpynetRat(Signature):
name = "rat_spynet"
description = "Creates known SpyNet mutexes and/or registry changes."
severity = 3
catego
|
ries = ["rat"]
families = ["spynet"]
authors = ["threatlead", "nex"]
references = [
"https://malwr.com/analysis/ZDQ1NjBhNWIzNTdkNDRhNjhkZTFmZTBkYTU2YjMwNzg/",
"https://malwr.com/analysis/MjkxYmE2YzczNzcwNGJiZjljNDcwMzA2ZDkyNDU2Y2M/",
"https://malwr.com/analysis/N2E3NWRiNDMyYjIwNGE0NTk3Y2E5NWMzN2UwZTVjMzI/",
"https://malwr.com/analysis/N2Q2NWY0Y2MzOTM0NDEzNmE1MTdhOThiNTQxMzhiNzk/"
]
minimum = "1.2"
def run(self):
indicators = [
".*CYBERGATEUPDATE",
".*\(\(SpyNet\)\).*",
".*Spy-Net.*",
".*X_PASSWORDLIST_X.*",
".*X_BLOCKMOUSE_X.*",
#".*PERSIST", # Causes false positive detection on XtremeRAT samples.
".*_SAIR",
]
for indicator in indicators:
if self.check_mutex(pattern=indicator, regex=True):
return True
keys = [
".*\\SpyNet\\.*",
]
for key in keys:
if self.check_write_key(pattern=key, regex=True):
return True
return False
|
michealcarrerweb/LHVent_app
|
operation_finance/migrations/0021_auto_20170712_0222.py
|
Python
|
mit
| 519
| 0.001927
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-12 02:22
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation_finance', '0020_auto_20170711_1429'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='due_by',
field
|
=models.DateField(default=datetime.date
|
time(2017, 8, 6, 2, 22, 37, 974278)),
),
]
|
alexgibson/bedrock
|
bedrock/firefox/redirects.py
|
Python
|
mpl-2.0
| 32,590
| 0.005247
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from bedrock.redirects.util import no_redirect, platform_redirector, redirect
def firefox_mobile_faq(request, *args, **kwargs):
qs = request.META.get("QUERY_STRING", "")
if "os=firefox-os" in qs:
return "https://support.mozilla.org/products/firefox-os"
return "https://support.mozilla.org/products/mobile"
def firefox_channel(*args, **kwargs):
return platform_redirector("firefox.channel.desktop", "firefox.channel.android", "firefox.channel.ios")
redirectpatterns = (
# overrides
# issue 8096
redirect(r"^firefox/beta/all/?$", "firefox.all", anchor="product-desktop-beta"),
redirect(r"^firefox/developer/all/?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/aurora/all/?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/nightly/all/?$", "firefox.all", anchor="product-desktop-nightly"),
redirect(r"^firefox/organizations/all/?$", "firefox.all", anchor="product-desktop-esr"),
redirect(r"^firefox/android/all/?$", "firefox.all", anchor="product-android-release"),
redirect(r"^firefox/android/beta/all/?$", "firefox.all", anchor="product-android-beta"),
redirect(r"^firefox/android/nightly/all/?$", "firefox.all", anchor="product-android-nightly"),
# bug 831810 & 1142583 & 1239960, 1329931
redirect(r"^mwc/?$", "https://support.mozilla.org/products/firefox-os", re_flags="i"),
# bug 748503
redirect(r"^projects/firefox/[^/]+a[0-9]+/firstrun(?P<p>.*)$", "/firefox/nightly/firstrun{p}"),
# bug 1275483
redirect(r"^firefox/nightly/whatsnew/?", "f
|
irefox.nightly.firstrun"),
# bug 840814
redirect(
r"^projects/firefox"
r"(?P<version>/(?:\d+\.\d+\.?(?:\d+)?\.?(?:\d+)?(?:[a|b]?)(?:\d*)(?:pre)?(?:\d)?))"
r"(?P<page>/(?:firstrun|whatsnew))"
r"(?P<res
|
t>/.*)?$",
"/firefox{version}{page}{rest}",
),
# bug 877165
redirect(r"^firefox/connect", "mozorg.home"),
# bug 657049, 1238851
redirect(r"^firefox/accountmanager/?$", "https://developer.mozilla.org/Persona"),
# Bug 1009247, 1101220, 1299947, 1314603, 1328409
redirect(r"^(firefox/)?beta/?$", firefox_channel(), cache_timeout=0, anchor="beta"),
redirect(r"^(firefox/)?aurora/?$", firefox_channel(), cache_timeout=0, anchor="aurora"),
redirect(r"^(firefox/)?nightly/?$", firefox_channel(), cache_timeout=0, anchor="nightly"),
redirect(r"^mobile/beta/?$", "firefox.channel.android", anchor="beta"),
redirect(r"^mobile/aurora/?$", "firefox.channel.android", anchor="aurora"),
redirect(r"^mobile/nightly/?$", "firefox.channel.android", anchor="nightly"),
# bug 988044
redirect(r"^firefox/unsupported-systems\.html$", "firefox.unsupported-systems"),
# bug 736934, 860865, 1101220, 1153351
redirect(r"^mobile/notes/?$", "/firefox/android/notes/"),
redirect(r"^mobile/(?P<channel>(beta|aurora))/notes/?$", "/firefox/android/{channel}/notes/"),
redirect(r"^firefox/system-requirements(\.html)?$", "/firefox/system-requirements/"),
redirect(r"^firefox/(?P<channel>(beta|aurora|organizations))/system-requirements(\.html)?$", "/firefox/{channel}/system-requirements/"),
# bug 1155870
redirect(r"^firefox/os/(releases|notes)/?$", "https://developer.mozilla.org/Firefox_OS/Releases"),
redirect(r"^firefox/os/(?:release)?notes/(?P<v>[^/]+)/?$", "https://developer.mozilla.org/Firefox_OS/Releases/{v}"),
# bug 878871
redirect(r"^firefoxos", "/firefox/os/"),
# bug 1438302
no_redirect(r"^firefox/download/thanks/?$"),
# Bug 1006616
redirect(r"^download/?$", "firefox.new"),
# Bug 1409554
redirect(r"^(firefox|mobile)/download", "firefox.new"),
# bug 837883
redirect(r"^firefox/firefox\.exe$", "mozorg.home", re_flags="i"),
# bug 821006
redirect(r"^firefox/all(\.html)?$", "firefox.all"),
# bug 727561
redirect(r"^firefox/search(?:\.html)?$", "firefox.new"),
# bug 860865, 1101220, issue 8096
redirect(r"^firefox/all-(?:beta|rc)(?:/|\.html)?$", "firefox.all", anchor="product-desktop-beta"),
redirect(r"^firefox/all-aurora(?:/|\.html)?$", "firefox.all", anchor="product-desktop-developer"),
redirect(r"^firefox/aurora/(?P<page>all|notes|system-requirements)/?$", "/firefox/developer/{page}/"),
redirect(r"^firefox/organizations/all\.html$", "firefox.all", anchor="product-desktop-esr"),
# bug 729329
redirect(r"^mobile/sync", "firefox.sync"),
# bug 882845
redirect(r"^firefox/toolkit/download-to-your-devices", "firefox.new"),
# bug 1014823
redirect(r"^(products/)?firefox/releases/whatsnew/?$", "firefox.whatsnew"),
# bug 929775
redirect(
r"^firefox/update",
"firefox.new",
query={
"utm_source": "firefox-browser",
"utm_medium": "firefox-browser",
"utm_campaign": "firefox-update-redirect",
},
),
# Bug 868182, 986174
redirect(r"^(m|(firefox/)?mobile)/features/?$", "firefox.browsers.mobile.index"),
redirect(r"^(m|(firefox/)?mobile)/faq/?$", firefox_mobile_faq, query=False),
# bug 884933
redirect(r"^(m|(firefox/)?mobile)/platforms/?$", "https://support.mozilla.org/kb/will-firefox-work-my-mobile-device"),
redirect(r"^m/?$", "firefox.new"),
# Bug 730488 deprecate /firefox/all-older.html
redirect(r"^firefox/all-older\.html$", "firefox.new"),
# bug 1120658
redirect(r"^seamonkey-transition\.html$", "http://www-archive.mozilla.org/seamonkey-transition.html"),
# Bug 1186373
redirect(r"^firefox/hello/npssurvey/?$", "https://www.surveygizmo.com/s3/2227372/Firefox-Hello-Product-Survey", permanent=False),
# Bug 1221739
redirect(r"^firefox/hello/feedbacksurvey/?$", "https://www.surveygizmo.com/s3/2319863/d2b7dc4b5687", permanent=False),
# bug 1148127
redirect(r"^products/?$", "firefox"),
# Bug 1110927
redirect(r"^(products/)?firefox/start/central\.html$", "firefox.new"),
redirect(r"^firefox/sync/firstrun\.html$", "firefox.sync"),
# Bug 920212
redirect(r"^firefox/fx(/.*)?", "firefox"),
# Bug 979531, 1003727, 979664, 979654, 979660
redirect(r"^firefox/customize/?$", "https://support.mozilla.org/kb/customize-firefox-controls-buttons-and-toolbars"),
redirect(r"^firefox/(?:performance|happy|speed|memory)/?$", "firefox.features.fast"),
redirect(r"^firefox/security/?$", "firefox.features.independent"),
redirect(r"^firefox/technology/?$", "https://developer.mozilla.org/docs/Tools"),
# Previously Bug 979527 / Github #10004 "Getting Started" Page
redirect(r"^(products/)?firefox/central(/|\.html|-lite\.html)?$", "firefox"),
# bug 868169
redirect(r"^mobile/android-download\.html$", "https://play.google.com/store/apps/details", query={"id": "org.mozilla.firefox"}, merge_query=True),
redirect(
r"^mobile/android-download-beta\.html$",
"https://play.google.com/store/apps/details",
query={"id": "org.mozilla.firefox_beta"},
merge_query=True,
),
# bug 675031
redirect(
r"^projects/fennec(?P<page>/[\/\w\.-]+)?", "http://website-archive.mozilla.org/www.mozilla.org/fennec_releasenotes/projects/fennec{page}"
),
# bug 876581
redirect(r"^firefox/phishing-protection(/?)$", "https://support.mozilla.org/kb/how-does-phishing-and-malware-protection-work"),
# bug 1006079
redirect(r"^mobile/home/?(?:index\.html)?$", "https://blog.mozilla.org/services/2012/08/31/retiring-firefox-home/"),
# bug 949562
redirect(
r"^mobile/home/1\.0/releasenotes(?:/(?:index\.html)?)?$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0/releasenotes/",
),
redirect(
r"^mobile/home/1\.0\.2/releasenotes(?:/(?:index\.html)?)?$",
"http://website-archive.mozilla.org/www.mozilla.org/firefox_home/mobile/home/1.0.2/releasenotes/",
),
redirect(r"^mobile/home/faq(?:/(?:index\.html)?)?$", "http://website-archive.mozilla.org/www.
|
vendelin8/serverApplet
|
plugin/__init__.py
|
Python
|
gpl-2.0
| 1,584
| 0.004422
|
# -*- coding: utf-8 -*-
#
# Plugins' module file for serverApplet.
# Copyright (C) 2015 Gergely Bódi
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
globalPluginFunctions = ['init', # initializes the plugin with the app object
'load', # loads a
|
n account with that plugin
'testLogin', # tests an account after creating it
'startCron', # does cron job functionality for the given login
'nextCron', # returns the time for the next cron job running time for the given login
'getParams'] # returns the params dict for the given login
def localPluginFunctions(
|
modulename):
'''Returns additional plugin functions for the plugin as a dict: {"system tray menu label": "plugin function name"}'''
return __import__('plugin.{}'.format(modulename), fromlist=['actions']).actions
|
oomlout/oomlout-OOMP
|
old/OOMPpart_CAPC_0603_X_PF100_V50.py
|
Python
|
cc0-1.0
| 245
| 0
|
import OOMP
newPart = OOMP.oompItem(8826)
newPart.addTag("oompType", "CAPC")
newPart.addTag("oompSize", "0603")
newPart.addTag("oompColor", "X")
newPart.
|
add
|
Tag("oompDesc", "PF100")
newPart.addTag("oompIndex", "V50")
OOMP.parts.append(newPart)
|
home-assistant/home-assistant
|
homeassistant/components/hue/v1/sensor_base.py
|
Python
|
apache-2.0
| 7,575
| 0.001056
|
"""Support for the Philips Hue sensors as a platform."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
from aiohue import AiohueException, Unauthorized
from aiohue.v1.sensors import TYPE_ZLL_PRESENCE
import async_timeout
from homeassistant.components.sensor import SensorStateClass
from homeassistant.core import callback
from homeassistant.helpers import debounce, entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from ..const import REQUEST_REFRESH_DELAY
from .helpers import remove_devices
from .hue_event import EVENT_CONFIG_MAP
from .sensor_device import GenericHueDevice
SENSOR_CONFIG_MAP: dict[str, Any] = {}
LOGGER = logging.getLogger(__name__)
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id
class SensorManager:
"""Class that handles registering and updating Hue sensor entities.
Intended to be a singleton.
"""
SCAN_INTERVAL = timedelta(seconds=5)
def __init__(self, bridge):
"""Initialize the sensor manager."""
self.bridge = bridge
self._component_add_entities = {}
self.current = {}
self.current_events = {}
self._enabled_platforms = ("binary_sensor", "sensor")
self.coordinator = DataUpdateCoordinator(
bridge.hass,
LOGGER,
name="sensor",
update_method=self.async_update_data,
update_interval=self.SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
bridge.hass, LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
async def async_update_data(self):
"""Update sensor data."""
try:
async with async_timeout.timeout(4):
return await self.bridge.async_request_call(
self.bridge.api.sensors.update
)
except Unauthorized as err:
await self.bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
async def async_register_component(self, platform, async_add_entities):
"""Register async_add_entities methods for components."""
self._component_add_entities[platform] = async_add_entities
if len(self._component_add_entities) < len(self._enabled_platforms):
LOGGER.debug("Aborting start with %s, waiting for the rest", platform)
return
# We have all components available, start the updating.
self.bridge.reset_jobs.append(
self.coordinator.async_ad
|
d_listener(self.async_update_items)
)
await self.coordinator.async_refresh()
@callback
def async_upda
|
te_items(self):
"""Update sensors from the bridge."""
api = self.bridge.api.sensors
if len(self._component_add_entities) < len(self._enabled_platforms):
return
to_add = {}
primary_sensor_devices = {}
current = self.current
# Physical Hue motion sensors present as three sensors in the API: a
# presence sensor, a temperature sensor, and a light level sensor. Of
# these, only the presence sensor is assigned the user-friendly name
# that the user has given to the device. Each of these sensors is
# linked by a common device_id, which is the first twenty-three
# characters of the unique id (then followed by a hyphen and an ID
# specific to the individual sensor).
#
# To set up neat values, and assign the sensor entities to the same
# device, we first, iterate over all the sensors and find the Hue
# presence sensors, then iterate over all the remaining sensors -
# finding the remaining ones that may or may not be related to the
# presence sensors.
for item_id in api:
if api[item_id].type != TYPE_ZLL_PRESENCE:
continue
primary_sensor_devices[_device_id(api[item_id])] = api[item_id]
# Iterate again now we have all the presence sensors, and add the
# related sensors with nice names where appropriate.
for item_id in api:
uniqueid = api[item_id].uniqueid
if current.get(uniqueid, self.current_events.get(uniqueid)) is not None:
continue
sensor_type = api[item_id].type
# Check for event generator devices
event_config = EVENT_CONFIG_MAP.get(sensor_type)
if event_config is not None:
base_name = api[item_id].name
name = event_config["name_format"].format(base_name)
new_event = event_config["class"](api[item_id], name, self.bridge)
self.bridge.hass.async_create_task(
new_event.async_update_device_registry()
)
self.current_events[uniqueid] = new_event
sensor_config = SENSOR_CONFIG_MAP.get(sensor_type)
if sensor_config is None:
continue
base_name = api[item_id].name
primary_sensor = primary_sensor_devices.get(_device_id(api[item_id]))
if primary_sensor is not None:
base_name = primary_sensor.name
name = sensor_config["name_format"].format(base_name)
current[uniqueid] = sensor_config["class"](
api[item_id], name, self.bridge, primary_sensor=primary_sensor
)
to_add.setdefault(sensor_config["platform"], []).append(current[uniqueid])
self.bridge.hass.async_create_task(
remove_devices(
self.bridge,
[value.uniqueid for value in api.values()],
current,
)
)
for platform, value in to_add.items():
self._component_add_entities[platform](value)
class GenericHueSensor(GenericHueDevice, entity.Entity):
"""Representation of a Hue sensor."""
should_poll = False
@property
def available(self):
"""Return if sensor is available."""
return self.bridge.sensor_manager.coordinator.last_update_success and (
self.allow_unreachable
# remotes like Hue Tap (ZGPSwitchSensor) have no _reachability_
or self.sensor.config.get("reachable", True)
)
@property
def state_class(self):
"""Return the state class of this entity, from STATE_CLASSES, if any."""
return SensorStateClass.MEASUREMENT
async def async_added_to_hass(self):
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.bridge.sensor_manager.coordinator.async_add_listener(
self.async_write_ha_state
)
)
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self.bridge.sensor_manager.coordinator.async_request_refresh()
class GenericZLLSensor(GenericHueSensor):
"""Representation of a Hue-brand, physical sensor."""
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {"battery_level": self.sensor.battery}
|
ticosax/django-rest-framework
|
tests/test_fields.py
|
Python
|
bsd-2-clause
| 38,463
| 0.00117
|
from decimal import Decimal
from django.utils import timezone
from rest_framework import serializers
import rest_framework
import datetime
import django
import pytest
import uuid
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` i
|
s set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
outp
|
ut = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class MockHTMLDict(dict):
"""
This class mocks up a dictionary like object, that behaves
as if it was returned for multipart or urlencoded data.
"""
getlist = None
class TestBooleanHTMLInput:
def setup(self):
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
self.Serializer = TestSerializer
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by Boolean
|
brad/swftools
|
spec/gradients.py
|
Python
|
gpl-2.0
| 3,650
| 0.019452
|
from sys import *
from pdflib_py import *
p = PDF_new()
PDF_open_file(p, "gradients.pdf")
PDF_set_parameter(p, "usercoordinates", "true")
PDF_set_value(p, "compress", 0)
PDF_set_info(p, "Author", "pdflib")
PDF_set_info(p, "Creator", "pdflib_py")
PDF_set_info(p, "Title", "gradients")
width = 1024
height = 800
PDF_begin_page(p, width, height)
type,x,params = "radial",0,"r0=0 r1=320"
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.0, 1.0)
shading = PDF_shading(p, type, 160+x,160+y, 160+x, 160+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_fill(p)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_stroke(p)
type,x,params = "axial",200,""
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
sha
|
ding = PDF_shading(p, type, 0+x,0+y, 320+x,320+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x,y)
P
|
DF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_fill(p)
PDF_moveto(p, x,y)
PDF_curveto(p, x+80, y+80, x+80, y+240, x, y+320)
PDF_curveto(p, x+80, y+240, x+240, y+240, x+320, y+320)
PDF_curveto(p, x+240, y+240, x+240, y+80, x+320, y)
PDF_curveto(p, x+240, y+80, x+80, y+80, x, y)
PDF_stroke(p)
type,x,params = "radial",500,"r0=0 r1=220"
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 120+x, 340+y, 120+x, 340+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
type,x,params = "axial",600,""
y = 0
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 80+x, 80+y, 80+x, 640+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+640)
PDF_lineto(p, x+160, y+640)
PDF_lineto(p, x+160, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
type,x,params = "axial",50,""
y = 300
PDF_setcolor(p, "fill", "rgb", 0.0, 0.0, 0.4, 1.0)
shading = PDF_shading(p, type, 80+x, 80+y, 400+x, 80+y, 1.0, 1.0, 1.0, 1.0, params) #axial|radial
pattern = PDF_shading_pattern(p,shading,"")
PDF_setcolor(p, "fill", "pattern", pattern,0,0,0)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+160)
PDF_lineto(p, x+400, y+160)
PDF_lineto(p, x+400, y+80)
PDF_lineto(p, x+80, y+80)
PDF_fill(p)
PDF_moveto(p, x+80, y+80)
PDF_lineto(p, x+80, y+160)
PDF_lineto(p, x+400, y+160)
PDF_lineto(p, x+400, y+80)
PDF_lineto(p, x+80, y+80)
PDF_stroke(p)
PDF_end_page(p)
PDF_close(p)
PDF_delete(p);
|
obnam-mirror/obnam
|
obnamlib/app.py
|
Python
|
gpl-3.0
| 11,277
| 0
|
# Copyright (C) 2009-2017 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import socket
import sys
import time
import tracing
import cliapp
import larch
import paramiko
import ttystatus
import obnamlib
class ObnamIOError(obnamlib.ObnamError):
msg = 'I/O error: {filename}: {errno}: {strerror}'
class ObnamSSHError(obnamlib.ObnamError):
msg = 'SSH error: {msg}'
class ObnamSystemError(obnamlib.ObnamError):
msg = 'System error: {filename}: {errno}: {strerror}'
class App(cliapp.Application):
'''Main program for backup program.'''
def add_settings(self):
# General settings.
self.settings.string(
['repository', 'r'],
'name of backup repository (can be pathname or supported URL)',
metavar='URL')
self.settings.string(
['client-name'],
'name of client (defaults to hostname)',
default=self.deduce_client_name())
self.settings.boolean(
['quiet', 'silent'],
'be silent: show only error messages, no progress updates')
self.settings.boolean(
['verbose'],
'be verbose: tell the user more of what is going on and '
'generally make sure the user is aware of what is happening '
'or at least that something is happening and '
'also make sure their screen is getting updates frequently '
'and that there is changes happening all the time so they '
'do not get bored and that they in fact get frustrated by '
'getting distracted by so many updates that they will move '
'into the Gobi desert to live under a rock')
self.settings.boolean(
['pretend', 'dry-run', 'no-act'],
'do not actually change anything (works with '
'backup, forget and restore only, and may only '
'simulate approximately real behavior)')
self.settings.integer(
['lock-timeout'],
'when locking in the backup repository, '
'wait TIMEOUT seconds for an existing lock '
'to go away before giving up',
metavar='TIMEOUT',
default=60)
# Repository format selection.
self.settings.choice(
['repository-format'],
['6', obnamlib.GREEN_ALBATROSS_VERSION],
'use FORMAT for new repositories; '
'one of "6", "{}"'.format(obnamlib.GREEN_ALBATROSS_VERSION),
metavar='FORMAT')
algos = list(obnamlib.checksum_algorithms)
algos.remove('sha512') # We move this first in the list, default.
algos.remove('md5') # We're NOT letting the user choose MD5.
algos = ['sha512'] + algos
self.settings.choice(
['checksum-algorithm'],
algos,
'use CHECKSUM for checksum algorithm '
'(not for repository format 6); '
'one of: ' +
', '.join(algos),
metavar='CHECKSUM')
# Performance related settings.
perf_group = obnamlib.option_group['perf']
self.settings.bytesize(
['node-size'],
'size of B-tree nodes on disk; only affects new '
'B-trees so you may need to delete a client '
'or repository to change this for existing '
'repositories',
default=obnamlib.DEFAULT_NODE_SIZE,
group=perf_group)
self.settings.bytesize(
['chunk-size'],
'size of chunks of file data backed up',
default=obnamlib.DEFAULT_CHUNK_SIZE,
group=perf_group)
self.settings.bytesize(
['upload-queue-size'],
'length of upload queue for B-tree nodes',
default=obnamlib.DEFAULT_UPLOAD_QUEUE_SIZE,
group=perf_group)
self.settings.bytesize(
['lru-size'],
'size of LRU cache for B-tree nodes',
default=obnamlib.DEFAULT_LRU_SIZE,
group=perf_group)
self.settings.integer(
['idpath-depth'],
'depth of chunk id mapping',
default=obnamlib.IDPATH_DEPTH,
group=perf_group)
self.settings.integer(
['idpath-bits'],
'chunk id level size',
default=obnamlib.IDPATH_BITS,
group=perf_group)
self.settings.integer(
['idpath-skip'],
'chunk id mapping lowest bits skip',
default=obnamlib.IDPATH_SKIP,
group=perf_group)
# Settings to help developers and development of Obnam.
devel_group = obnamlib.option_group['devel']
self.settings.string_list(
['trace'],
'add to filename patters for which trace '
'debugging logging happens',
group=devel_group)
self.settings.string(
['pretend-time'],
'pretend it is TIMESTAMP (YYYY-MM-DD HH:MM:SS); '
'this is only useful for testing purposes',
metavar='TIMESTAMP',
group=devel_group)
self.settings.integer(
['crash-limit'],
'artificially crash the program after COUNTER '
'files written to the repository; this is '
'useful for crash testing the application, '
'and should not be enabled for real use; '
'set to 0 to disable (disabled by default)',
metavar='COUNTER',
group=devel_group)
# The following needs to be done here, because it needs
# to be done before option processing. This is a bit ugly,
# but the best we can do with the current cliapp structure.
# Possibly cliapp will provide a better hook for us to use
# later on, but this is reality now.
self.setup_ttystatus()
self.fsf = obnamlib.VfsFactory()
self.repo_factory = obnamlib.RepositoryFactory()
self.setup_hooks()
self.settings['log-level'] = 'info'
def deduce_client_name(self):
return socket.gethostname()
def setup_hooks(self):
self.hooks = obnamlib.HookManager()
self.hooks.new('config-loaded')
self.hooks.new('shutdown')
# The repository factory creates all repository related hooks.
self.repo_factory.setup_hooks(self.hooks)
def setup(self):
self.pluginmgr.plugin_arguments = (self,)
def process_args(self, args):
try:
try:
if self.settings['quiet']:
self.ts.disable()
|
for pattern in self.settings['trace']:
tracing.trace_add_pattern(pattern)
self.hooks.call('config-loade
|
d')
cliapp.Application.process_args(self, args)
self.hooks.call('shutdown')
except paramiko.SSHException as e:
logging.critical(
'Caught SSHExcpetion: %s', str(e), exc_info=True)
raise ObnamSSHError(msg=str(e))
except IOError as e:
logging.critical('Caught IOError: %s', str(e), exc_info=True)
raise ObnamIOError(
errno=e.errno, strerror=e.strerror, filename=e.filename)
except OSError as e:
logging.critical('Caught OSError: %s', str(e), exc_info=True)
raise ObnamSystemError(
errno=e.errno, strerror=e.strerror, filename=e.filename)
except larch.Error as e:
|
garncarz/dns-server
|
dns/migrations/0003_redirection.py
|
Python
|
gpl-2.0
| 640
| 0.001563
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-10 22:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dns', '0002_auto_20151228_0134'),
]
operations = [
migrations.Cr
|
eateModel(
name='Redirection',
fields=[
|
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abbr', models.CharField(max_length=100, unique=True)),
('target', models.URLField()),
],
),
]
|
JKlesmith/Bioinformatics
|
ProcessMSA.py
|
Python
|
bsd-3-clause
| 4,782
| 0.015056
|
#!/usr/bin/python
#Copyright (c) 2016, Justin R. Klesmith
#All rights reserved.
from __future__ import division
from math import log, sqrt, pow
import argparse, os, random
#Set the author information
__author__ = "Justin R. Klesmith"
__copyright__ = "Copyright 2016, Justin R. Klesmith"
__credits__ = ["Justin R. Klesmith", "Timothy A. Whitehead"]
__license__ = "BSD-3"
__version__ = "X.X, Build: 2016XXXX"
__maintainer__ = "Justin R. Klesmith"
__email__ = ["klesmit3@msu.edu", "justinklesmith@gmail.com", "justinklesmith@evodyn.com"]
#Get commandline arguments
parser = argparse.ArgumentParser(description='Process the MSA to get into a format for PSI-Blast')
parser.add_argument('-m', dest='msa', action='store', required=True, help='MSA file path')
parser.add_argument('-l', dest='length', action='store', required=True, help='Length of protein')
#parser.add_argument('-d', dest='dssp', action='store', required=True, help='Path to processed DSSP output')
args = parser.parse_args()
#Populate array
Mutations = {}
for j in xrange(1,int(args.length)):
#Mutations[j] = False
Mutations[j] = None
#Import DSSP Information from CSV
#if os.path.isfile(args.dssp):
# with open(args.dssp, 'r') as infile: #Open the file with the wild-type protein sequence
# for line in infile:
# split = line.split(",")
# if split[0] != "ID": #Skip the CSV header
# location = int(split[0])
# ss = str(split[1]).rstrip("\n\r")
#
# if len(ss) == 0:
# Mutations[location] = "L"
# else:
# Mutations[location] = ss
#If loop then set true
#if len(ss) == 0 or ss == "S" or ss == "T":
#Mutations[location] = True
#else:
# print "Cannot open the processed DSSP"
# quit()
#Import msa alignment
Alignment = ""
outfile = open('msatemp.csv', 'w')
if os.path.isfile(args.msa):
with open(args.msa, 'r') as infile: #Open the file with the wild-type protein sequence
Output = ""
for line in infile:
#Check to see if we have a header
if line[0] == ">":
#print Output #Print the current alignment
Alignment = Alignment + Output + "\n"
Output = "" #Empty the current alignment
Output = Output + line.rstrip('\n') + "," #Assemble the line
else:
Output = Output + line.rstrip('\n') #Assemble the line
else:
print "Cannot open the processed NCBI CSV"
quit()
outfile.write(Alignment)
outfile.close()
#Import MSA into a lookup table
MSATable = {}
outfile = open('msatemp2.csv', 'w')
with open('msatemp.csv', 'r') as infile: #Open the file with the wild-type protein sequence
for line in infile:
split = line.split(",")
if len(line) > 10:
MSATable.update({split[0] : split[1].rstrip("\n")})
outfile.write(split[1])
outfile.c
|
lose()
#Make a DSSP lookup string
Wildtype = MSATable[">ENTER YOUR WILD-TYPE SEQUENCE HEADER NAME HERE found in the MSA or CDHIT Cluster"]
MSAWTLen = len(Wi
|
ldtype)
#CorrectedDSSP = ""
#DSSPCount = 1
#print Wildtype
#DSSP = ""
#for j in xrange(1,int(args.length)):
#Mutations[j] = False
#DSSP = DSSP + Mutations[j].rstrip("\n\r")
#print DSSP
#for j in xrange(0,MSAWTLen):
# if Wildtype[j] == "-":
# CorrectedDSSP = CorrectedDSSP + "-"
# else:
# CorrectedDSSP = CorrectedDSSP + Mutations[DSSPCount]
# DSSPCount = DSSPCount + 1
#Add the lookup string to the 2nd temp table
#with open('msatemp2.csv', 'r+') as f:
# content = f.read()
# f.seek(0, 0)
# f.write(CorrectedDSSP + '\n' + Wildtype + '\n\n' + content)
#Time to mark the insertions
XedOut = ""
outfile2 = open('msatemp3.csv', 'w')
Wildtype = Wildtype + "\n"
MSAWTLen = len(Wildtype)
with open('msatemp2.csv', 'r') as f:
for line in f:
for i in xrange(0,MSAWTLen):
if Wildtype[i] == "-":
XedOut = XedOut + "X"
else:
XedOut = XedOut + line[i]
outfile2.write(XedOut)
outfile2.close()
#Now let's delete the insertions
output = ""
with open('msatemp3.csv', 'r') as f1:
for line in f1:
Len = len(line)
for i in xrange(0, Len):
if line[i] != "X":
output = output + line[i]
f1o = open('msatemp4.csv', 'w')
f1o.write(output)
f1o.close()
#to make the fifth file sequences that lacked the length and lgk3
output = ""
with open('msatemp5.csv', 'r') as f2:
for line in f2:
output = output + ">" + str(random.random()) + "\n" + line + "\n"
f1o = open('msatemp6.csv', 'w')
f1o.write(output)
f1o.close()
|
pchickey/paparazzi-linux-release
|
sw/tools/calibration/calib.py
|
Python
|
gpl-2.0
| 3,989
| 0.018802
|
#
# My first attempt at python
# calibrate accelerometer
#
import re
import scipy
from scipy import optimize
from scipy import linalg
from pylab import *
#
# parse the log
#
def read_log(ac_id, filename, sensor):
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while 1:
line = f.readline().strip()
if line == '':
break
m=re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return scipy.array(list_meas)
#
# select only non-noisy data
#
def filter_meas(meas, window_size, noise_threshold):
filtered_meas = []
filtered_idx = []
for i in range(window_size,len(meas)-window_size):
noise = meas[i-window_size:i+window_size,:].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i,:])
filtered_idx.append(i)
return scipy.array(filtered_meas), filtered_idx
#
# initial boundary based calibration
#
def get_min_max_guess(meas, scale):
max_meas = meas[:,:].max(axis=0)
min_meas = meas[:,:].min(axis=0)
n = (max_meas + min_meas) / 2
sf = 2*scale/(max_meas - min_meas)
return scipy.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
#
# scale the set of measurements
#
def scale_measurements(meas, p):
l_comp = [];
l_norm = [];
for m in meas[:,]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return scipy.array(l_comp), scipy.array(l_norm)
#
# print xml for airframe file
#
def print_xml(p, sensor, res):
print ""
print "<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>"
print "<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>"
print "<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>"
print "<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>"
print "<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>"
print "<define name=\""+sensor+"_Z_SENS\" value=\""+str(p
|
[5]*2**res)+"\" integer=\"16\"/>"
filename = 'log_accel_booz2_a2'
ac_id = "151"
if 1:
sensor = "ACCEL"
sensor_ref = 9.81
sensor_res = 10
noise_window = 20;
noise_threshold = 40;
else:
sensor = "MAG"
sensor_ref = 1.
sensor_res = 11
noise_window = 10;
noise_threshold = 1000;
print "reading file "+filename+" for aircraft "+ac_id+" and sensor "+sensor
measurements = read_log(ac_id, filename, sensor)
print "fou
|
nd "+str(len(measurements))+" records"
flt_meas, flt_idx = filter_meas(measurements, noise_window, noise_threshold)
print "remaining "+str(len(flt_meas))+" after low pass"
p0 = get_min_max_guess(flt_meas, sensor_ref)
cp0, np0 = scale_measurements(flt_meas, p0)
print "initial guess : "+str(np0.mean())+" "+str(np0.std())
print p0
def err_func(p,meas,y):
cp, np = scale_measurements(meas, p)
err = y*scipy.ones(len(meas)) - np
return err
p1, success = optimize.leastsq(err_func, p0[:], args=(flt_meas, sensor_ref))
cp1, np1 = scale_measurements(flt_meas, p1)
print "optimized guess : "+str(np1.mean())+" "+str(np1.std())
print p1
print_xml(p1, sensor, sensor_res)
subplot(3,1,1)
plot(measurements[:,0])
plot(measurements[:,1])
plot(measurements[:,2])
plot(flt_idx, flt_meas[:,0], 'ro')
plot(flt_idx, flt_meas[:,1], 'ro')
plot(flt_idx, flt_meas[:,2], 'ro')
subplot(3,2,3)
plot(cp0[:,0]);
plot(cp0[:,1]);
plot(cp0[:,2]);
plot(-sensor_ref*scipy.ones(len(flt_meas)));
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,4)
plot(np0);
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,5)
plot(cp1[:,0]);
plot(cp1[:,1]);
plot(cp1[:,2]);
plot(-sensor_ref*scipy.ones(len(flt_meas)));
plot(sensor_ref*scipy.ones(len(flt_meas)));
subplot(3,2,6)
plot(np1);
plot(sensor_ref*scipy.ones(len(flt_meas)));
show();
|
ttn-be/ttnmapper
|
config.py
|
Python
|
mit
| 1,297
| 0.010023
|
from network import WLAN
##############################
|
#################################################
# Settings for WLAN STA mode
###############################################################################
WLAN_MODE = 'off'
#WLAN_SSID = ''
#WLAN_AUTH = (
|
WLAN.WPA2,'')
###############################################################################
# LoRaWAN Configuration
###############################################################################
# May be either 'otaa', 'abp', or 'off'
LORA_MODE = 'otaa'
# Settings for mode 'otaa'
LORA_OTAA_EUI = '70B3D57EF0001ED4'
LORA_OTAA_KEY = None # See README.md for instructions!
# Settings for mode 'abp'
#LORA_ABP_DEVADDR = ''
#LORA_ABP_NETKEY = ''
#LORA_ABP_APPKEY = ''
# Interval between measures transmitted to TTN.
# Measured airtime of transmission is 56.6 ms, fair use policy limits us to
# 30 seconds per day (= roughly 500 messages). We default to a 180 second
# interval (=480 messages / day).
LORA_SEND_RATE = 180
###############################################################################
# GNSS Configuration
###############################################################################
GNSS_UART_PORT = 1
GNSS_UART_BAUD = 9600
GNSS_ENABLE_PIN = 'P8'
|
chubbymaggie/reverse
|
plasma/lib/__init__.py
|
Python
|
gpl-3.0
| 12,594
| 0.005717
|
#!/usr/bin/env python3
#
# PLASMA : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from argparse import ArgumentParser
import plasma
import plasma.lib.utils
import plasma.lib.colors
from plasma.lib.database import Database
from plasma.lib.disassembler import Disassembler, NB_LINES_TO_DISASM
from plasma.lib.utils import die, error, debug__
from plasma.lib.generate_ast import generate_ast
from plasma.lib.exceptions import ExcArch, ExcFileFormat, ExcIfelse, ExcPEFail
#
# The global context variable is always named as gctx
#
class GlobalContext():
def __init__(self):
# TODO : let globally ?
plasma.lib.utils.gctx = self
plasma.lib.colors.gctx = self
self.comments = True # always True, will be removed
# For info() messages
self.quiet = False
self.is_interactive = False
# Command line options
self.print_andif = True
self.color = True
self.max_data_size = 30
self.filename = None
self.syms = False
self.calls_in_section = None
self.entry = None # string : symbol | EP | 0xNNNN
self.do_dump = False
self.vim = False
self.nb_lines = 30
self.graph = False # Print graph != gph -> object
self.interactive_mode = False
self.debug = False
self.raw_base = 0
self.raw_big_endian = False
self.list_sections = False
self.print_bytes = False
self.raw_type = None
self.print_data = False
self.capstone_string = 0 # See lib.ui.visual.main_cmd_inst_output
self.show_mangling = True
self.autoanalyzer = True
self.debugsp = False
# Built objects
self.dis = None # Disassembler
self.libarch = None # module lib.arch.<BIN_ARCH>
self.db = None # Database
self.api = None # Api
def parse_args(self):
parser = ArgumentParser(description=
'Reverse engineering for x86/ARM/MIPS binaries. Generation of pseudo-C. '
'Supported formats : ELF, PE. More commands available in the interactive'
' mode. https://github.com/joelpx/plasma')
parser.add_argument('filename', nargs='?', metavar='FILENAME')
parser.add_argument('-nc', '--nocolor', action='store_true')
parser.add_argument('-g', '--graph', action='store_true',
help='Generate a file graph.dot.')
parser.add_argument('--noandif', action='store_true',
help="Print normal 'if' instead of 'andif'")
parser.add_argument('--datasize', type=int, default=30, metavar='N',
help='defaul
|
t 30, maximum of chars to display for strings or bytes array.')
parser.add_argument('-x', '--entry', metavar='SYMBOLNAME|0xXXXXX|EP',
help='Pseudo-decompilation, default is main. EP stands for entry point.')
parser.add_argument('--vim', action='store_true',
help='Generate syntax colors for vim')
parser.add_argument('-s', '--symbols', action='store_true',
help='Print all symbols')
parser.a
|
dd_argument('--sections', action='store_true',
help='Print all sections')
parser.add_argument('--dump', action='store_true',
help='Dump asm without decompilation')
parser.add_argument('-l', '--lines', type=int, default=30, metavar='N',
help='Max lines used with --dump')
parser.add_argument('--nbytes', type=int, default=0, metavar='N',
help='Print n bytes.')
parser.add_argument('-i', '--interactive', action='store_true',
help='Interactive mode')
parser.add_argument('-d', '--opt_debug', action='store_true')
parser.add_argument('--raw', metavar='x86|x64|arm|mips|mips64',
help='Consider the input file as a raw binary')
parser.add_argument('--rawbase', metavar='0xXXXXX',
help='Set base address of a raw file (default=0)')
parser.add_argument('--rawbe', action='store_true',
help='If not set it\'s in little endian')
parser.add_argument('-na', '--noautoanalyzer', action='store_true',
help='Disable analysis on the entry point / symbols and don\'t scan memmory. You can force it with the command push_analyze_symbols.')
parser.add_argument('--debugsp', action='store_true',
help="Print the stack offset on each instructions. Warning: these values will not be saved in the database.")
args = parser.parse_args()
self.debug = args.opt_debug
self.print_andif = not args.noandif
self.color = not args.nocolor
self.max_data_size = args.datasize
self.filename = args.filename
self.raw_type = args.raw
self.raw_base = args.rawbase
self.syms = args.symbols
self.entry = args.entry
self.do_dump = args.dump
self.vim = args.vim
self.interactive_mode = args.interactive
self.nb_lines = args.lines
self.graph = args.graph
self.raw_big_endian = args.rawbe
self.list_sections = args.sections
self.autoanalyzer = not args.noautoanalyzer
self.debugsp = args.debugsp
if args.nbytes == 0:
self.nbytes = 4
self.print_bytes = False
else:
self.nbytes = int(args.nbytes)
self.print_bytes = True
if self.raw_base is not None:
try:
self.raw_base = int(self.raw_base, 16)
except:
error("--rawbase must be in hex format")
die()
else:
self.raw_base = 0
def load_file(self, filename=None):
if filename is None:
filename = self.filename
if not os.path.exists(filename):
error("file {self.filename} doesn't exist".format(self=self))
if self.interactive_mode:
return False
die()
if not os.path.isfile(filename):
error("this is not a file".format(self=self))
if self.interactive_mode:
return False
die()
self.db = Database()
self.db.load(filename)
if self.raw_base != 0:
self.db.raw_base = self.raw_base
if self.raw_type is not None:
self.db.raw_type = self.raw_type
if self.raw_big_endian is not None:
self.db.raw_is_big_endian = self.raw_big_endian
if self.db.loaded:
self.raw_base = self.db.raw_base
self.raw_type = self.db.raw_type
self.raw_big_endian = self.db.raw_is_big_endian
try:
dis = Disassembler(filename, self.raw_type,
self.raw_base, self.raw_big_endian,
self.db)
except ExcArch as e:
error("arch %s is not supported" % e.arch)
if self.interactive_mode:
return False
die()
except ExcFileFormat:
error("the file is not PE or ELF binary")
if self.interactive_mode:
return False
die()
except ExcPEFail as e:
error(str(e.e))
error("it seems that there is a random bug in pefile, you shoul retry.
|
vgrem/Office365-REST-Python-Client
|
office365/outlook/calendar/calendar_permission.py
|
Python
|
mit
| 1,204
| 0.005814
|
from office365.entity import Entity
from office365.outlook.calendar.email_address import EmailAddress
class CalendarPermission(Entity):
"""
The permissions of a user with whom the calendar has been shared or delegated in an Outlook client.
Get, update, and delete of calendar permissions is supported on behalf of o
|
nly the calendar owner.
Getting the calendar permissions of a calendar on behalf of a sharee or delegate returns
an empty calendar permissions collection.
Once a sharee or delegate has been set up for a calendar, you can update only the role property to change
the permissions of a sharee or delegate. You cannot update
|
the allowedRoles, emailAddress, isInsideOrganization,
or isRemovable property. To change these properties, you should delete the corresponding calendarPermission
object and create another sharee or delegate in an Outlook client.
"""
@property
def email_address(self):
"""
Represents a sharee or delegate who has access to the calendar.
For the "My Organization" sharee, the address property is null. Read-only.
"""
return self.properties.get("emailAddress", EmailAddress())
|
pepeportela/edx-platform
|
common/lib/xmodule/xmodule/word_cloud_module.py
|
Python
|
agpl-3.0
| 8,926
| 0.001232
|
"""Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
from pkg_resources import resource_string
from xblock.fields import Boolean, Dict, Integer, List, Scope, String
from xblock.fragment import Fragment
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.x_module import XModule
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class WordCloudFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
scope=Scope.settings,
default="Word cloud"
)
instructions = String(
display_name=_("Instructions"),
help=_("Add instructions to help learners understand how to use the word cloud. Clear instructions are important, especially for learners who have accessibility requirements."), # nopep8 pylint: disable=C0301
scope=Scope.settings,
)
num_inputs = Integer(
display_name=_("Inputs"),
help=_("The number of text boxes available for learners to add words and sentences."),
scope=Scope.settings,
default=5,
values={"min": 1}
)
num_top_words = Integer(
display_name=_("Maximum Words"),
help=_("The maximum number of words displayed in the generated word cloud."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
display_student_percents = Boolean(
display_name=_("Show Percents"),
help=_("Statistics are shown for entered words near that word."),
scope=Scope.settings,
default=True
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this learner has posted words to the cloud."),
scope=Scope.user_state,
default=False
)
student_words = List(
help=_("Student answer."),
scope=Scope.user_state,
default=[]
)
all_words = Dict(
help=_("All possible words from all learners."),
scope=Scope.user_state_summary
)
top_words = Dict(
help=_("Top num_top_words words for word cloud."),
scope=Scope.user_state_summary
)
class WordCloudModule(WordCloudFields, XModule):
"""WordCloud Xmodule"""
js = {
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
],
}
css = {'scss': [resource_string(__name__, 'css/word_cloud/display.scss')]}
js_module_name = "WordCloud"
def get_state(self):
"""Return success json answer for client."""
if self.submitted:
total_count = sum(self.all_words.itervalues())
return json.dumps({
'status': 'success',
'submitted': True,
'display_student_percents': pretty_bool(
self.display_student_percents
),
'student_words': {
word: self.all_words[word] for word in self.student_words
},
'total_count': total_count,
'top_words': self.prepare_words(self.top_words, total_count)
})
else:
return json.dumps({
'status': 'success',
'submitted': False,
'display_student_percents': False,
'student_words': {},
'total_count': 0,
'top_words': {}
})
def good_word(self, word):
"""Convert raw word to suitable word."""
return word.strip().lower()
def prepare_words(self, top_words, total_count):
"""Convert words dictionary for client API.
:param top_words: Top words dictionary
:type top_words: dict
:param total_count: Total number of words
:type total_count: int
:rtype: list of dicts. Every dict is 3 keys: text - actual word,
size - counter of word, percent - percent in top_words dataset.
Calculates corrected percents for every top word:
For every word except last, it calculates rounded percent.
For the last is 100 - sum of all other percents.
"""
list_to_return = []
percents = 0
for num, word_tuple in enumerate(top_words.iteritems()):
if num == len(top_words) - 1:
percent = 100 - percents
else:
percent = round(100.0 * word_tuple[1] / total_count)
percents += percent
list_to_return.append(
{
'text': word_tuple[0],
'size': word_tuple[1],
'percent': percent
}
)
return list_to_return
def top_dict(self, dict_obj, amount):
"""Return top words from all words, filtered by n
|
umber of
occurences
:param dict_obj: all words
:type dict_obj: dict
:param amount: number of words to be in top dict
:type amount: int
:rtype: dict
"""
return dict(
s
|
orted(
dict_obj.items(),
key=lambda x: x[1],
reverse=True
)[:amount]
)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
raw_student_words = data.getall('student_words[]')
student_words = filter(None, map(self.good_word, raw_student_words))
self.student_words = student_words
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
# speed issues
temp_all_words = self.all_words
self.submitted = True
# Save in all_words.
for word in self.student_words:
temp_all_words[word] = temp_all_words.get(word, 0) + 1
# Update top_words.
self.top_words = self.top_dict(
temp_all_words,
self.num_top_words
)
# Save all_words in database.
self.all_words = temp_all_words
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def student_view(self, context):
"""
Renders the output that a student will see.
"""
fragment = Fragment()
fragment.add_content(self.system.render_template('word_cloud.html', {
'ajax_url': self.system.ajax_url,
'display_name': self.display_name,
'instructions': self.instructions,
'element_class': self.location.category,
'element_id': self.location.html_id(),
'num_inputs': self.num_inputs,
'submitted': self.submitted,
}))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/d3.min.js'))
|
anurag03/integration_tests
|
cfme/base/credential.py
|
Python
|
gpl-2.0
| 7,024
| 0.000712
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from cfme.utils import conf
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
class FromConfigMixin(object):
@staticmethod
def rename_properties(creds):
"""
helper function to make properties have same names in credential objects.
Args:
creds: dict
Returns: updated dict
"""
creds = deepcopy(creds)
to_rename = [('password', 'secret'), ('username', 'principal')]
for key1, key2 in to_rename:
|
if key1 in creds:
creds[key2] = creds[key1]
del creds[key1]
return creds
@classmethod
def from_config(cls, key):
"""
helper function which allows to construct credential object from credentials.eyaml
Args:
key: credential key
Retur
|
ns: credential object
"""
creds = cls.rename_properties(conf.credentials[key])
return cls(**creds)
@classmethod
def from_plaintext(cls, creds):
"""
helper function which allows to construct credential class from plaintext dict
Args:
creds: dict
Returns: credential object
"""
creds = cls.rename_properties(creds)
return cls(**creds)
class Credential(Pretty, Updateable, FromConfigMixin):
"""
A class to fill in credentials
Args:
principal: user name
secret: password
verify_secret: password
domain: concatenated with principal if defined
"""
pretty_attrs = ['principal', 'secret']
def __init__(self, principal, secret, verify_secret=None, domain=None,
tenant_id=None, subscription_id=None, **ignore):
self.principal = principal
self.secret = secret
self.verify_secret = verify_secret
self.domain = domain
self.tenant_id = tenant_id
self.subscription_id = subscription_id
def __getattribute__(self, attr):
if attr == 'verify_secret':
if object.__getattribute__(self, 'verify_secret') is None:
return object.__getattribute__(self, 'secret')
else:
return object.__getattribute__(self, 'verify_secret')
elif attr == 'principal':
domain = object.__getattribute__(self, 'domain')
principal = object.__getattribute__(self, 'principal')
return r'{}\{}'.format(domain, principal) if domain else principal
else:
return super(Credential, self).__getattribute__(attr)
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'username': self.principal,
'password': self.secret,
'confirm_password': None
}
def __eq__(self, other):
if other is None:
return False
return self.principal == other.principal and self.secret == other.secret and \
self.verify_secret == other.verify_secret
def __ne__(self, other):
return not self.__eq__(other)
class EventsCredential(Credential):
pass
class CANDUCredential(Credential):
pass
class AzureCredential(Credential):
pass
class SSHCredential(Credential):
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'username': self.principal,
'private_key': self.secret,
}
class TokenCredential(Pretty, Updateable, FromConfigMixin):
"""
A class to fill in credentials
Args:
token: identification token
verify_token: token once more
"""
pretty_attrs = ['token']
def __init__(self, token, verify_token=None, **kwargs):
self.token = token
self.verify_token = verify_token
for name, value in kwargs.items():
setattr(self, name, value)
def __getattribute__(self, attr):
if attr == 'verify_token':
if object.__getattribute__(self, 'verify_token') is not None:
return object.__getattribute__(self, 'verify_token')
else:
return object.__getattribute__(self, 'token')
else:
return super(TokenCredential, self).__getattribute__(attr)
def __eq__(self, other):
return self.token == other.token and self.verify_token == other.verify_token
def __ne__(self, other):
return not self.__eq__(other)
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'token': self.token,
'verify_token': None
}
class ServiceAccountCredential(Pretty, Updateable):
"""
A class to fill in credentials
Args:
service_account: service account string
"""
pretty_attrs = ['service_account']
def __init__(self, service_account):
super(ServiceAccountCredential, self)
self.service_account = service_account
@property
def view_value_mapping(self):
"""
used for filling forms like add/edit provider form
Returns: dict
"""
return {
'service_account': self.service_account
}
def __eq__(self, other):
return self.service_account == other.service_account
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def from_config(cls, key):
# TODO: refactor this. consider json.dumps
creds = deepcopy(conf.credentials[key])
service_data = creds['service_account']
service_account = '''
"type": "{type}",
"project_id": "{project}",
"private_key_id": "{private_key_id}",
"private_key": "{private_key}",
"client_email": "{email}",
"client_id": "{client}",
"auth_uri": "{auth}",
"token_uri": "{token}",
"auth_provider_x509_cert_url": "{auth_provider}",
"client_x509_cert_url": "{cert_url}"
'''.format(
type=service_data.get('type'),
project=service_data.get('project_id'),
private_key_id=service_data.get('private_key_id'),
private_key=service_data.get('private_key').replace('\n', '\\n'),
email=service_data.get('client_email'),
client=service_data.get('client_id'),
auth=service_data.get('auth_uri'),
token=service_data.get('token_uri'),
auth_provider=service_data.get('auth_provider_x509_cert_url'),
cert_url=service_data.get('client_x509_cert_url'))
service_account = '{' + service_account + '}'
return cls(service_account=service_account)
|
mozilla/make.mozilla.org
|
make_mozilla/pages/models.py
|
Python
|
bsd-3-clause
| 5,208
| 0.003072
|
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
from make_mozilla.core import fields
class Page(models.Model):
title = models.CharField(max_length=255)
path = models.SlugField()
real_path = models.CharField(max_length=1024, unique=True, blank=True)
parent = models.ForeignKey('self', blank=True, null=True,
help_text='This will allow you to use URLs like /about/foo - parent.path + path',
related_name='children')
show_subnav = models.BooleanField(default=False,
verbose_name='Show sub-navigation menu')
subnav_title = models.CharField(max_length=100, blank=True, null=True,
verbose_name='Menu title', help_text='This can be left blank if you do not need a title')
additional_content = models.TextField(blank=True, null=True)
def has_ancestor(self, page):
if not self.parent:
return False
if self.parent.id == page.id:
return True
return self.parent.has_ancestor(page)
def get_section_root(self):
return self.real_path.split('/')[0]
def clean(self):
self.path = self.path.strip('/')
if self.parent:
if self.parent.has_ancestor(self):
raise ValidationError('Cannot set page parent to one of its descendants')
self.real_path = '%s/%s' % (self.parent.real_path, self.path)
else:
self.real_path = self.path
try:
if Page.objects.exclude(id__exact=self.id).get(real_path=self.real_path):
raise ValidationError('This path/parent combination already exists.')
except Page.DoesNotExist:
# We can safely ignore this, as it means we're in the clear and our path is fine
pass
def save(self, *args, **kwargs):
super(Page, self).save(*args, **kwargs)
# Now we tell our children to update their real paths
# This will happen recursively, so we don't need to worry about that logic here
for child in self.children.all():
child.real_path = '%s/%s' % (self.real_path, child.path)
child.save()
def __unicode__(self):
return self.title
@property
def indented_title(self):
indent = len(self.real_path.split('/')) - 1
if not indent:
return self.title
return '%s %s' % ('-' * indent, self.title)
def get_absolute_url(self):
return reverse('page', args=[self.real_path])
class PageSection(models.Model):
title = models.CharField(max_length=255)
subnav_title = models.CharField(max_length=255, blank=True, null=True,
verbose_name='Sub-navigation title', help_text='Will use the section title if blank')
page = models.ForeignKey('Page', related_name='sections')
poster = fields.SizedImageField(
blank=True,
null=True,
upload_to='pages',
storage=FileSystemStorage(**settings.UPLOADED_IMAGES),
sizes={
'standard': 900,
'tablet': 700,
'handheld': 500,
})
content = models.TextField()
sideba
|
r = models.TextField(blank=True, null=True)
quotes = models.ManyToManyField('Quote', blank=True, null=True)
class Meta:
verbose_name = 'section'
ordering = ['id']
def __unicode__(self):
return mark_safe(self.title)
@property
def nav_title(self):
if self.subnav_title:
return mark_safe(self.subnav_title)
return unicode(self)
@property
def has_sidebar(self):
return self.sidebar or self.quotes.count()
|
class Quote(models.Model):
quote = models.CharField(max_length=1000)
source = models.ForeignKey('QuoteSource', blank=True, null=True)
url = models.URLField(blank=True, null=True, verbose_name='URL')
show_source_image = models.BooleanField(default=False, help_text='Show the source\'s image next to this quote, if available')
@property
def clean_quote(self):
return strip_tags(self.quote)
def __unicode__(self):
quote = self.clean_quote
if len(quote) > 25:
quote = quote[:25] + '...'
if not self.source:
return quote
return '%s (%s)' % (quote, self.source.name)
class QuoteSource(models.Model):
name = models.CharField(max_length=255)
strapline = models.CharField(max_length=255, blank=True, null=True, help_text='"Teacher", "CEO, MegaCorp", ...')
url = models.URLField(blank=True, null=True, verbose_name='URL')
avatar = fields.SizedImageField(
blank=True,
null=True,
verbose_name='Image',
upload_to='avatars',
storage=FileSystemStorage(**settings.UPLOADED_IMAGES),
sizes={
'adjusted': (90,90),
})
class Meta:
verbose_name = 'source'
def __unicode__(self):
if self.strapline:
return '%s - %s' % (self.name, self.strapline)
return self.name
|
Azure/azure-sdk-for-python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/aio/operations/_diagnostic_settings_operations.py
|
Python
|
mit
| 12,838
| 0.004674
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DiagnosticSettingsOperations:
"""DiagnosticSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2017_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_uri: str,
name: str,
**kwargs: Any
) -> "_models.DiagnosticSettingsResource":
"""Gets the active diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
|
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
|
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
async def create_or_update(
self,
resource_uri: str,
name: str,
parameters: "_models.DiagnosticSettingsResource",
**kwargs: Any
) -> "_models.DiagnosticSettingsResource":
"""Creates or updates diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param name: The name of the diagnostic setting.
:type name: str
:param parameters: Parameters supplied to the operation.
:type parameters: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiagnosticSettingsResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiagnosticSettingsResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-05-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DiagnosticSettingsResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiagnosticSettingsResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/diagnosticSettings/{name}'} # type: ignore
async def delete(
self,
resource_uri: str,
name: str,
**kwargs: Any
) -> None:
"""Deletes existing diagnostic settings for the specified resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param
|
uclouvain/osis_louvain
|
assessments/tests/functionals/test_score_encoding.py
|
Python
|
agpl-3.0
| 57,982
| 0.004933
|
import datetime
import functools
import os
import random
import shutil
import tempfile
import time
from urllib import request
import faker
import magic
import pendulum
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import tag
from django.urls import reverse
from django.utils import timezone
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from attribution.tests.factories.attribution import AttributionFactory
from base.tests.factories.academic_calendar import (AcademicCalendarExamSubmissionFactory,
AcademicCalendarFactory)
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.exam_enrollment import ExamEnrollmentFactory
from base.tests.factories.learning_unit import LearningUnitFactory
from base.tests.factories.learning_unit_enrollment import \
LearningUnitEnrollmentFactory
from base.tests.factories.learning_unit_year import LearningUnitYearFactory
from base.tests.factories.offer_enrollment import OfferEnrollmentFactory
from base.tests.factories.offer_year import OfferYearFactory
from base.tests.factories.offer_year_calendar import OfferYearCalendarFactory
from base.tests.factories.person i
|
mport PersonFactory
from base.tests.factories.program_manager import ProgramManagerFactory
from base.tests.factories.session_exam_calendar import \
SessionExamCalendarFactory
from base.tests.factories.session_examen import SessionExamFactory
from base.tests.factories.student import StudentFactory
from base.tests.factories.tutor import TutorFactory
from base.tests.factories.user import SuperUserFactory, UserFactory
class BusinessMixin
|
:
def create_user(self):
return UserFactory()
def create_super_user(self):
return SuperUserFactory()
def add_group(self, user, *group_names):
for name in group_names:
group, created = Group.objects.get_or_create(name=name)
group.user_set.add(user)
def add_permission(self, user, *permission_names):
for permission_name in permission_names:
if '.' in permission_name:
label, codename = permission_name.split('.')
permission = Permission.objects.get(codename=codename, content_type__app_label=label)
else:
permission = Permission.objects.get(codename=permission_name)
user.user_permissions.add(permission)
@tag('selenium')
class SeleniumTestCase(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.sel_settings = settings.SELENIUM_SETTINGS
print("### Virtual Display : {}".format(cls.sel_settings.get('VIRTUAL_DISPLAY')))
cls.screen_size = (cls.sel_settings.get('SCREEN_WIDTH'), cls.sel_settings.get('SCREEN_HIGH'))
cls.full_path_temp_dir = tempfile.mkdtemp('osis-selenium')
if cls.sel_settings.get('WEB_BROWSER').upper() == 'FIREFOX':
fp = webdriver.FirefoxProfile()
fp.set_preference('browser.download.dir', cls.full_path_temp_dir)
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('pdfjs.disabled', True)
known_mimes = ['application/vnd.ms-excel',
'application/pdf',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', ','.join(known_mimes))
options = Options()
if cls.sel_settings.get('VIRTUAL_DISPLAY'):
options.add_argument('-headless')
cls.driver = webdriver.Firefox(executable_path=cls.sel_settings.get('GECKO_DRIVER'),
firefox_profile=fp, firefox_options=options)
if cls.sel_settings.get('WEB_BROWSER').upper() == 'CHROME':
options = webdriver.ChromeOptions()
if cls.sel_settings.get('VIRTUAL_DISPLAY'):
options.add_argument('-headless')
options.add_experimental_option('prefs', {
'download.default_directory': cls.full_path_temp_dir,
'download.prompt_for_download': False,
'download.directory_upgrade': True,
'safebrowsing.enabled': True
})
cls.driver = webdriver.Chrome(chrome_options=options)
cls.driver.implicitly_wait(10)
cls.driver.set_window_size(*cls.screen_size)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.full_path_temp_dir)
cls.driver.quit()
super().tearDownClass()
def get_url_by_name(self, url_name, *args, **kwargs):
return request.urljoin(self.live_server_url, reverse(url_name, args=args, kwargs=kwargs))
def goto(self, url_name, *args, **kwargs):
url = self.get_url_by_name(url_name, *args, **kwargs)
self.driver.get(url)
def fill_by_id(self, field_id, value):
field = self.driver.find_element_by_id(field_id)
field.clear()
field.send_keys(value)
def login(self, username, password='password123'):
self.goto('login')
self.fill_by_id('id_username', username)
self.fill_by_id('id_password', password)
self.click_on('post_login_btn')
def click_on(self, id_):
self.driver.find_element_by_id(id_).click()
def get_element(self, id_):
return self.driver.find_element_by_id(id_)
def get_element_text(self, id_):
return self.get_element(id_).text
def assertElementTextEqualInt(self, id_, value):
self.assertEqual(int(self.get_element_text(id_)), value)
def assertElementTextEqual(self, id_, text):
self.assertEqual(self.get_element_text(id_), text)
def assertCurrentUrl(self, url_name, *args, **kwargs):
self.assertEqual(
self.driver.current_url,
self.get_url_by_name(url_name, *args, **kwargs)
)
class FunctionalTest(SeleniumTestCase, BusinessMixin):
def test_01_scenario_modifier_periode_encoding(self):
user = self.create_super_user()
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarFactory.build(academic_year=academic_year)
academic_calendar.save()
self.login(user.username)
self.goto('academic_calendar_read', academic_calendar_id=academic_calendar.id)
self.click_on('bt_academic_calendar_edit')
new_date = academic_calendar.start_date - datetime.timedelta(days=5)
new_date_str = new_date.strftime('%m/%d/%Y')
self.fill_by_id('txt_start_date', new_date_str)
self.driver.execute_script("scroll(0, 250)")
self.click_on('bt_academic_calendar_save')
self.assertCurrentUrl('academic_calendar_form',academic_calendar_id=academic_calendar.id)
self.assertElementTextEqual('ac_start_date', new_date_str)
def test_01_scenario_modifier_period_encoding_date_fin(self):
user = self.create_super_user()
academic_year = AcademicYearFactory(year=pendulum.today().year-1)
academic_calendar = AcademicCalendarFactory.build(academic_year=academic_year)
academic_calendar.save()
self.login(user.username)
self.goto('academic_calendar_read', academic_calendar_id=academic_calendar.id)
self.click_on('bt_academic_calendar_edit')
new_date = academic_calendar.end_date + datetime.timedelta(days=5)
new_date_str = new_date.strftime('%m/%d/%Y')
self.fill_by_id('txt_end_date', new_date_str)
self.driver.execute_script("scroll(0, 250)")
self.click_on('bt_academic_calendar_save')
self.assertCurrentUrl('academic_calendar_form', academic_calendar_id=academic_calendar.id)
|
nextml/NEXT
|
apps/PoolBasedTripletMDS/algs/ValidationSampling/utilsMDS.py
|
Python
|
apache-2.0
| 14,387
| 0.018002
|
"""
utilsMDS.py
author: Kevin Jamieson (kevin.g.jamieson@gmail.com)
edited: 1/18/15
This module has methods that assist with non-metric multidimensional scaling.
If you're trying to COMPUTE an embedding, you might simply call:
X,emp_loss = computeEmbedding(n,d,S)
You may also consider getLoss to check how well an embedding is performing.
"""
from numpy import *
from numpy.random import *
import numpy.random
from numpy.linalg import *
#eig = numpy.linalg
norm = linalg.norm
floor = math.floor
ceil = math.ceil
import time
def main():
"""
Example of Usage
Creates some fake data and finds an embedding
"""
# generate some fake data
n = 30
d = 2
m = int(ceil(40*n*d*log(n))) # number of labels
p = 0.1; # error rate
Strain = []
Stest = []
Xtrue = randn(n,d);
for iter in range(0,m):
# get random triplet
q,score = getRandomQuery(Xtrue)
# align it so it agrees with Xtrue: "q[2] is more similar to q[0] than q[1]"
query_ordering_disagrees_with_Xtrue = score<0
if query_ordering_disagrees_with_Xtrue:
q = [ q[i] for i in [1,0,2]]
# add some noise
R = rand()
if R<p:
q = [ q[i] for i in [1,0,2]]
if iter < .9*m:
Strain.append(q)
else:
Stest.append(q)
# compute embedding
X,emp_loss_train = computeEmbedding(n,d,Strain,num_random_restarts=2,epsilon=0.01,verbose=True)
# compute loss on test set
emp_loss_test,hinge_loss_test = getLoss(X,Stest)
print
print 'Training loss = %f, Test loss = %f' %(emp_loss_train,emp_loss_test)
def getRandomQuery(X):
"""
Outputs a triplet [i,j,k] chosen uniformly at random from all possible triplets
and score = abs( ||x_i - x_k||^2 - ||x_j - x_k||^2 )
Inputs:
(numpy.ndarray) X : matrix from which n is extracted from and score is derived
Outputs:
[(int) i, (int) j, (int) k] q : where k in [n], i in [n]-k, j in [n]-k-j
(float) score : signed distance to current solution (positive if it agrees, negative otherwi
|
se)
Usage:
q,score = getRandomQuery(X)
"""
n,d = X.shape
while True:
i = randint
|
(n)
j = randint(n)
k = randint(n)
if i != j and j != k and k != i:
break
q = [i, j, k]
score = getTripletScore(X,q)
return q,score
def getTripletScore(X,q):
"""
Given X,q=[i,j,k] returns score = ||x_j - x_k||^2 - ||x_i - x_k||^2
If score > 0 then the triplet agrees with the embedding, otherwise it does not
Usage:
score = getTripletScore(X,[3,4,5])
"""
i,j,k = q
return dot(X[j],X[j]) -2*dot(X[j],X[k]) + 2*dot(X[i],X[k]) - dot(X[i],X[i])
def getLoss(X,S):
"""
Returns loss on X with respect to list of triplets S: 1/len(S) \sum_{q in S} loss(X,q).
Intuitively, q=[i,j,k] "agrees" with X if ||x_j - x_k||^2 > ||x_i - x_k||^2.
For q=[i,j,k], let s(X,q) = ||x_j - x_k||^2 - ||x_i - x_k||^2
If loss is hinge_loss then loss(X,q) = max(0,1-s(X,q))
If loss is emp_loss then loss(X,q) = 1 if s(X,q)<0, and 0 otherwise
Usage:
emp_loss, hinge_loss = getLoss(X,S)
"""
n = X.shape[0]
d = X.shape[1]
emp_loss = 0 # 0/1 loss
hinge_loss = 0 # hinge loss
for q in S:
loss_ijk = getTripletScore(X,q)
hinge_loss = hinge_loss + max(0,1. - loss_ijk)
if loss_ijk < 0:
emp_loss = emp_loss + 1.
emp_loss = emp_loss/len(S)
hinge_loss = hinge_loss/len(S)
return emp_loss, hinge_loss
def getGradient(X,S):
"""
Returns normalized gradient of hinge loss wrt to X and S.
Intuitively, q=[i,j,k] "agrees" with X if ||x_j - x_k||^2 > ||x_i - x_k||^2.
For q=[i,j,k], let s(X,q) = ||x_j - x_k||^2 - ||x_i - x_k||^2
If loss is hinge_loss then loss(X,q) = max(0,1-s(X,q))
Usage:
G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq = getGradient(X,S)
"""
n,d = X.shape
m = len(S)
# pattern for computing gradient
H = mat([[2.,0.,-2.],[ 0., -2., 2.],[ -2., 2., 0.]])
# compute gradient
G = zeros((n,d))
for q in S:
score = getTripletScore(X,q)
if 1.-score>0:
grad_partial = dot(H,X[q,:])/m
G[q,:] = G[q,:] + grad_partial
# compute statistics about gradient used for stopping conditions
mu = mean(X,0)
avg_row_norm_sq = 0.
avg_grad_row_norm_sq = 0.
max_grad_row_norm_sq = 0.
norm_grad_sq_0 = 0.
for i in range(n):
row_norm_sq = 0
grad_row_norm_sq = 0
for j in range(d):
row_norm_sq += (X[i,j]-mu[j])*(X[i,j]-mu[j])
grad_row_norm_sq += G[i,j]*G[i,j]
avg_row_norm_sq += row_norm_sq/n
avg_grad_row_norm_sq += grad_row_norm_sq/n
max_grad_row_norm_sq = max(max_grad_row_norm_sq,grad_row_norm_sq)
return G,avg_grad_row_norm_sq,max_grad_row_norm_sq,avg_row_norm_sq
def computeEmbedding(n,d,S,num_random_restarts=0,max_num_passes=0,max_iter_GD=0,max_norm=0,epsilon=0.01,verbose=False):
"""
Computes an embedding of n objects in d dimensions usin the triplets of S.
S is a list of triplets such that for each q in S, q = [i,j,k] means that
object k should be closer to i than j.
Inputs:
(int) n : number of objects in embedding
(int) d : desired dimension
(list [(int) i, (int) j,(int) k]) S : list of triplets, i,j,k must be in [n].
(int) num_random_restarts : number of random restarts (nonconvex
optimization, may converge to local minima). E.g., 9 random restarts
means take the best of 10 runs of the optimization routine.
(int) max_num_passes : maximum number of passes over data SGD makes before proceeding to GD (default equals 16)
(int) max_iter_GD: maximum number of GD iteration (default equals 50)
(float) max_norm : the maximum allowed norm of any one object (default equals 10*d)
(float) epsilon : parameter that controls stopping condition, smaller means more accurate (default = 0.01)
(boolean) verbose : outputs some progress (default equals False)
Outputs:
(numpy.ndarray) X : output embedding
(float) gamma : Equal to a/b where a is max row norm of the gradient matrix and b is the avg row norm of the centered embedding matrix X. This is a means to determine how close the current solution is to the "best" solution.
"""
if max_num_passes==0:
max_num_passes_SGD = 16
else:
max_num_passes_SGD = max_num_passes
if max_iter_GD ==0:
max_iter_GD = 50
X_old = None
emp_loss_old = float('inf')
num_restarts = -1
while num_restarts < num_random_restarts:
num_restarts += 1
ts = time.time()
X,acc = computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=max_num_passes_SGD,max_norm=max_norm,epsilon=epsilon,verbose=verbose)
te_sgd = time.time()-ts
ts = time.time()
X_new,emp_loss_new,hinge_loss_new,acc_new = computeEmbeddingWithGD(X,S,max_iters=max_iter_GD,max_norm=max_norm,epsilon=epsilon,verbose=verbose)
te_gd = time.time()-ts
if emp_loss_new<emp_loss_old:
X_old = X_new
emp_loss_old = emp_loss_new
if verbose:
print "restart %d: emp_loss = %f, hinge_loss = %f, duration=%f+%f" %(num_restarts,emp_loss_new,hinge_loss_new,te_sgd,te_gd)
return X_old,emp_loss_old
def computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=0,max_norm=0,epsilon=0.01,a0=0.1,verbose=False):
"""
Performs epochSGD where step size is constant across each epoch, epochs are
doubling in size, and step sizes are getting cut in half after each epoch.
This has the effect of having a step size decreasing like 1/T. a0 defines
the initial step size on the first epoch.
S is a list of triplets such that for each q in S, q = [i,j,k] means that
object k should be closer to i than j.
Inputs:
(int) n : number of objects in embedding
(int) d : des
|
lia2790/grasp_learning
|
python/simple_batch_splitter.py
|
Python
|
bsd-3-clause
| 913
| 0.004381
|
#!/usr/bin/env python
from mvbb.box_db import MVBBLoader
import multiprocessing, subprocess
from multiprocessing import Pool
import sys
from plugins import soft_hand
def grasp_boxes(filename):
subprocess.call(['python', './grasp_boxes_batch.py', filename])
if __name__ == '__main__':
try:
import os.path
filename = os.path.splitext(sys.argv[1])[0]
except:
file
|
name = 'box_db'
if not os.path.isfile(filename+'.csv'):
print "Error: file", filename, "doesn't exist"
exit()
try:
n_dofs = int(sys.argv[2])
n_l = int(sys.argv[3])
except:
n_dofs = soft_hand.numJoints
n_l = len(soft_hand.links_to_check)
# for SoftHand
box_db = MVBBLoader(filename, n_dof
|
s, n_l)
filenames = box_db.split_db()
p = Pool(multiprocessing.cpu_count())
p.map(grasp_boxes, filenames)
box_db.join_results(filenames)
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
|
Python
|
bsd-3-clause
| 2,300
| 0
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SU
|
CH DAMAGE.
import unittest2 as
|
unittest
from webkitpy.layout_tests.models.test_results import TestResult
class TestResultsTest(unittest.TestCase):
def test_defaults(self):
result = TestResult("foo")
self.assertEqual(result.test_name, 'foo')
self.assertEqual(result.failures, [])
self.assertEqual(result.test_run_time, 0)
def test_loads(self):
result = TestResult(test_name='foo',
failures=[],
test_run_time=1.1)
s = result.dumps()
new_result = TestResult.loads(s)
self.assertIsInstance(new_result, TestResult)
self.assertEqual(new_result, result)
# Also check that != is implemented.
self.assertFalse(new_result != result)
|
abitofalchemy/hrg_nets
|
karate_chop.py
|
Python
|
gpl-3.0
| 4,865
| 0.019527
|
import random
import math
import collections
import tree_decomposition as td
import create_production_rules as pr
import graph_sampler as gs
import stochastic_growth
import probabilistic_growth
import net_metrics
import matplotlib.pyplot as plt
import product
import networkx as nx
import numpy as np
import snap
#G = snap.GenRndGnm(snap.PUNGraph, 10000, 5000)
#G = nx.grid_2d_graph(4,4)
#line
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(3, 4)
#G.add_edge(4, 5)
#G.add_edge(5, 6)
#G.add_edge(6, 7)
#G.add_edge(7, 8)
#G.add_edge(8, 9)
#G.add_edge(9, 10)
#G.add_edge(10, 1) #circle
#G = nx.star_graph(6)
#G = nx.ladder_graph(10)
#G = nx.karate_club_graph()
#nx.write_edgelist((G.to_directed()), '../demo_graphs/karate.txt', comments="#", delimiter=' ', data=False)
#exit()
#G = nx.barabasi_albert_graph(1000,3)
#G = nx.connected_watts_strogatz_graph(200,8,.2)
#G = nx.read_edgelist("../demo_graphs/as20000102.txt")
G = nx.read_edgelist("../demo_graphs/CA-GrQc.txt")
#G = nx.read_edgelist("../demo_graphs/Email-Enron.txt")
#G = nx.read_edgelist("../demo_graphs/Brightkite_edges.txt")
G= list(nx.connected_component_subgraphs(G))[0]
##board example
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(2, 4)
#G.add_edge(3, 4)
#G.add_edge(3, 5)
#G.add_edge(4, 6)
#G.add_edge(5, 6)
#G.add_edge(1, 5)
# print G.number_of_nodes()
num_nodes = G.number_of_nodes()
print num_nodes
print
print "--------------------"
print "------- Edges ------"
print "--------------------"
num_edges = G.number_of_edges()
print num_edges
#print
#print "--------------------"
#print "------ Cliques -----"
#print "--------------------"
#print list(nx.find_cliques(G))
if not nx.is_connected(G):
print "Graph must be connected";
exit()
G.remove_edges_from(G.selfloop_edges())
if G.number_of_selfloops() > 0:
print "Graph must be not contain self-loops";
exit()
Ggl = gs.subgraphs_cnt(G,100)
setlendf = []
if num_nodes>400:
#for i in range(0,10):
# setlen = []
# for i in range(10,510, 20):
for Gprime in gs.rwr_sample(G, 10, 500):
pr.prod_rules = {}
T = td.quickbb(Gprime)
prod_rules = pr.learn_production_rules(Gprime, T)
# setlen.append(len(rule_probabilities))
print prod_rules
else:
T = td.quickbb(G)
prod_rules = pr.learn_production_rules(G, T)
print "Rule Induction
|
Complete"
exit()
Gergm = []
Gergmgl = []
for run in range(1, 3):
f = open('../demo_graphs/ergm_sim/enron/data '+str(run)+' .csv', 'r')
E=nx.Graph()
header = 0
for line in f:
line=line.rstrip()
if header == 0:
header+=1
continue
c
|
= line.split("\t")
if(len(c) is not 3): continue
E.add_edge(int(c[1]),int(c[2]))
if int(c[1]) > num_nodes or int(c[2]) > num_nodes:
continue
Gergm.append(E)
print "G ergm iteration " + str(run) + " of 20"
Gergmgl.append(gs.subgraphs_cnt(E,50))
k = int(math.floor(math.log(num_nodes, 10)))
P = [[.9716,.658],[.5684,.1256]] #karate
P = [[.8581,.5116],[.5063,.2071]] #as20000102
#P = [[.7317,.5533],[.5354,.2857]] #dblp
#P = [[.9031,.5793],[.5051,.2136]] #ca-grqc
#P = [[.9124,.5884],[.5029,.2165]] #enron
P = [[.8884,.5908],[.5628,.2736]] #brightkite
Gkron = product.kronecker_random_graph(k,P).to_undirected()
print("GKron finished")
sum = .9716+.5382+.5684+.1256 #karate
#sum = .8581+.5116+.5063+.2071 #as20000102
#sum = .7317+.5533+.5354+.2857 # dblp
#sum = .9031+.5793+.5051+.2136 #ca-grqc
#sum = .9124+.5884+.5029+.2165 #enron
sum = .8884+.5908+.5628+.2736 #brightkite
GRmatSNAP = snap.GenRMat(num_nodes, num_edges, P[0][0]/sum, P[0][1]/sum, P[1][0]/sum)
GRmat = nx.Graph()
for EI in GRmatSNAP.Edges():
GRmat.add_edge(EI.GetSrcNId(), EI.GetDstNId())
print("GRMAT finished")
GRmatgl = gs.subgraphs_cnt(GRmat,100)
n_distribution = {}
Gstar = []
Dstar = []
Gstargl = []
for run in range(0, 20):
nG, nD = stochastic_growth.grow(prod_rules, num_nodes/10,0)#num_nodes/50)
Gstar.append(nG)
Dstar.append(nD)
Gstargl.append(gs.subgraphs_cnt(nG,100))
#Gstar.append(probabilistic_growth.grow(rule_probabilities,prod_rule_set, num_nodes))
print "G* iteration " + str(run) + " of 20"
print(nD)
print ""
print "G* Samples Complete"
label = "AS"
net_metrics.draw_graphlet_plot(Ggl, Gstargl, Gergmgl, Gkron, GRmatgl, label, plt.figure())
exit()
net_metrics.draw_diam_plot(G, Dstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_degree_rank_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#net_metrics.draw_scree_plot(G, Gstar, label, ax1)
net_metrics.draw_network_value(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_hop_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#ax1.plot(ef.mean().index, ef.mean()[1],'b')
net_metrics.save_plot_figure_2disk()
|
Bolt64/proxy_switcher
|
proxy_autoconfig.py
|
Python
|
mit
| 3,103
| 0.010957
|
#!/usr/bin/env python
## Some necessary imports
from __future__ import print_function
from commands import getoutput
from time import sleep
from os.path import expanduser
import os
import re
from datetime import datetime
import process_lock as pl
###
## Configuration options
script_location = os.path.dirname(os.path.realpath(__file__))
proxy_ssid = ["iiscwlan", "opbwlan"] # Add whatever SSIDs you want to use the proxy for
proxy_set_script = "bash {0}/iisc_proxy_set.sh".format(script_location) # The script you
|
want to run to turn on proxy
proxy_unset_script = "bash {0}/proxy_u
|
nset.sh".format(script_location) # The script to turn off proxy
checking_interval = 2 # The checking frequency in seconds.
default_log_file = expanduser("~/.proxy_log") # Where the logging will happen.
ssid_matcher=re.compile("ESSID:\"[\w]*\"") # A regular expression to match to the output of iwconfig.
ssid_slice=slice(7, -1)
## Logs the string to the log file and stdout.
def log_output(string, log_file=default_log_file):
now = datetime.now()
timestamped_string = "[{0}:{1}:{2}-{3}/{4}/{5}] {6}".format(now.hour, now.minute, now.second, now.day, now.month, now.year, string)
file_to_write = open(log_file, "a")
file_to_write.write(timestamped_string)
print(timestamped_string, end="")
file_to_write.close()
###
def set_proxy():
log_output(str(getoutput(proxy_set_script))+'\n')
log_output(str(getoutput("cp {0}/proxy_settings_iiscwlan ~/.current_proxy".format(script_location)))+'\n')
def unset_proxy():
log_output(str(getoutput(proxy_unset_script))+'\n')
log_output(str(getoutput("cp {0}/proxy_settings_other ~/.current_proxy".format(script_location)))+'\n')
def get_ssid():
out=getoutput('/sbin/iwconfig')
result=ssid_matcher.search(out)
if result:
return result.string[result.start():result.end()][ssid_slice]
else:
return None
def main(interval=2):
current_ssid=get_ssid()
if current_ssid and current_ssid in proxy_ssid:
log_output("Detected proxy network. Trying to set proxy.\n")
set_proxy()
else:
log_output("WiFi off or non-proxy network detected. Trying to unset proxy.\n")
unset_proxy()
while True:
if not current_ssid:
log_output("WiFi is off. Doing nothing.\n")
else:
log_output("WiFi is on. Current ssid is {0}.\n".format(current_ssid))
sleep(interval)
new_ssid=get_ssid()
if new_ssid!=current_ssid:
if new_ssid and new_ssid in proxy_ssid:
log_output("Proxy network detected. Trying to set proxy.\n")
set_proxy()
else:
log_output("WiFi off or non-proxy network detected.\n")
unset_proxy()
current_ssid=new_ssid
if __name__=="__main__":
try:
import psutil
pid = os.getpid()
if not pl.process_is_running("proxy_autoconfig", [pid]):
main(checking_interval)
else:
print("Process already running.")
except ImportError:
main(checking_interval)
|
smorante/continuous-goal-directed-actions
|
simulated-CGDA/generalization/generalization_old_test2.py
|
Python
|
mit
| 7,027
| 0.021346
|
from __future__ import division
import itertools
from sklearn import mixture, metrics
from sklearn.cluster import DBSCAN
from scipy import linalg
from scipy.spatial import distance
import pylab as pl
import matplotlib as mpl
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
import csv
import numpy as np
# reading file
for action in ['wax', 'rotate', 'move', 'fold', 'paint']:
actionName=action
print "Action: ", actionName
# 6
reader=csv.reader(open("/home/santi/Repositories/cognitive/xgnitive/main/app/record/recorded3/"+actionName+"_6/data.log","rb"),delimiter=' ')
x=list(reader)
temp4=np.array(x).astype('float')
# Get the time range and rescale
# change made here
r = float(temp4[-1][1] - temp4[0][1])
temp4[:,1] = map(lambda x: (x - temp4[0][1]) / r, temp4[:,1])
##normalize (optional)
#temp4 /= np.max(np.abs(temp4), axis=0)
###########################################
######## Theoretical Normalization #########
## locX0 locY0 locZ0 area hue sat val angle
############################################
## spatial
## x
#temp4[:,2] /= 5000
## y
#temp4[:,3] /= 2000
## z
#temp4[:,4] /= 2000
## area
#temp4[:,5] /= 307200
## hue
#temp4[:,6] /= 180
## sat
#temp4[:,7] /= 255
## val
#temp4[:,8] /= 255
##angle
#temp4[:,9] /= 180
###
realDataMatrix= np.vstack([temp4])
# deletes first column (only -1 values)
realDataMatrix= np.delete(realDataMatrix,0,axis=1)
## bad way to delete last 8 columns
for d in range(8):
realDataMatrix = np.delete(realDataMatrix,9,axis=1)
#if: test all dimensions
Xnoisy = realDataMatrix # noisy dataset
#Xnoisy = sorted(Xnoisy, key=lambda column: column[1])
X=[]
##else: choose dimensions to be shown (dimOne=time, dimTwo=feature to measure)
#dimOne = realDataMatrix[:,0]
#dimTwo = realDataMatrix[:,1]
#Xnoisy = np.array([dimOne,dimTwo]).T # noisy dataset
#X=[] # future clean dataset
# Compute similarities
D = distance.squareform(distance.pdist(Xnoisy))
S = 1 - (D / np.max(D))
# Compute DBSCAN
db = DBSCAN(eps=0.001, min_samples=10, metric='cosine').fit(S)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
# Plotting DBSCAN (but also outlier detection)
core_samples = db.core_sample_indices_
unique_labels = set(labels)
preplot = pl.subplot(4, 1, 1)
colors = pl.cm.Blues(np.linspace(0, 1, len(unique_labels)))
outliers=[]
for k, col in zip(unique_labels, colors):
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
if index in core_samples and k != -1:
markersize = 8
X.append(Xnoisy[index])
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor=col, markeredgecolor='k', markersize=markersize)
else:
markersize = 3
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor='k', markeredgecolor='k', markersize=markersize)
if not X:
X=realDataMatrix #change here! to avoid null list
pl.xticks(())
pl.yticks(())
pl.title('DBSCAN. Estimated clusters: %d' % n_clusters_, size=20)
#assigning new clean dataset to variable X in numpy array
X = np.array(X)
# Initializing BIC parameters
lowest_bic = np.infty
bic = []
# choose number of clusters to test
if n_clusters_ <2:
componentToTest=3
else:
componentToTest=2*n_clusters_
print "Maximum components tested: ", componentToTest
n_components_range = range(1, componentToTest+1)
# this is a loop to test every component, choosing the lowest BIC at the end
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type='full')
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# over loading if compoenents = 1
print best_gmm
if len(best_gmm.means_)==1:
best_gmm = mixture.GMM(n_components=2, covariance_type='full')
best_gmm.fit(X)
## OVERLOAD A ELIMINAR
#best_gmm = mixture.GMM(n_components=12, covariance_type='full')
#best_gmm.fit(X)
# array of BIC for the graphic table column
bic = np.array(bic)
# one tested all components, here we choose the best
clf = best_gmm
print "Best result: ", clf
print 'Means: ', np.round(clf.means_,4)
## Plot the BIC scores
#bars = []
#spl = pl.subplot(4, 1, 2)
#xpos = np.array(n_components_range) - 0.1
#bars.append(pl.bar(xpos, bic[0:len(n_components_range)], width=.2, color='c'))
#pl.xticks(n_components_range, size=15)
#pl.yticks(([bic.min() * 1.01 - .01 * bic.max(), bic.max()]), size=12)
#pl.title('BIC Score', size=20)
#spl.set_xlabel('Number of components', size=15)
## Plot the winner
#splot = pl.subplot(4, 1, 3)
#Y_ = clf.predict(X)
#for i, (mean, covar) in enumerate(zip(clf.means_, clf.covars_)):
#v, w = linalg.eigh(covar)
#if not np.any(Y_ == i):
#continue
##pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 8, color='black')
#pl.plot(X[Y_ == i, 0], X[Y_ == i, 1], 'o', markerfacecolor='black', markeredgecolor='k', markersize=5)
## Plot an ellipse to show the Gaussian component
#angle = np.arctan2(w[0][1], w[0][0])
#angle = 180 * angle / np.pi # convert to degrees
#v *= 4
#ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color='b')
|
#ell.set_clip_box(splot.bbox)
#ell.set_alpha(.6)
#splot.add_artist(ell)
#pl.xticks(())
#pl.yticks(())
#pl.title('GMM-BIC. Components: ' + str(len(clf.means_)), size=20)
## saving centers
sortedPoints = sorted(clf.means_, key=lambda point: point[0])
np.savetxt("generalized/"+actionName+"Query", sortedPoints, fmt='%.14e')
## plot interpolation
#meansX, meansY = zip(*clf.means_)
#if len(meansX) > 1:
#minimT
|
ime=min(meansX)
#maximTime=max(meansX)
#print minimTime, maximTime
#xi = np.linspace(minimTime, maximTime, 10*len(meansX))
#testrbf = Rbf(meansX, meansY, function='gaussian')
#yi = testrbf(xi)
#pl.subplot(4, 1, 4)
#pl.plot(xi, yi, 'g')
#pl.scatter(meansX, meansY,20, color='blue')
#pl.xticks(())
#pl.yticks(())
#pl.title('RBF Interpolation', size=20)
#pl.subplots_adjust(hspace=.8, bottom=0.05)
#pl.show()
#else:
#pl.show()
#
|
mintuhouse/shotfactory
|
shotfactory04/gui/linux/navigator.py
|
Python
|
gpl-3.0
| 2,507
| 0
|
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fou
|
ndation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface fu
|
nctions for X11.
"""
__revision__ = "$Rev: 2749 $"
__date__ = "$Date: 2008-04-08 20:43:21 +0530 (Tue, 08 Apr 2008) $"
__author__ = "$Author: hawk $"
import os
from shotfactory04.gui import linux as base
class Gui(base.Gui):
"""
Special functions for Netscape Navigator.
"""
def reset_browser(self):
"""
Delete crash dialog and browser cache.
"""
home = os.environ['HOME']
self.delete_if_exists(os.path.join(home, '.netscape', 'cache'))
self.delete_if_exists(os.path.join(home, '.netscape', 'history.dat'))
self.delete_if_exists(os.path.join(home, '.netscape', 'cookies'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'Cache'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'sessionstore.js'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'history.dat'))
self.delete_if_exists(os.path.join(
home, '.netscape', 'navigator', '*', 'cookies.txt'))
def reuse_browser(self, config, url, options):
"""
Open a new URL in the same browser window.
"""
command = config['command'] or config['browser'].lower()
command = '%s -remote "OpenURL(%s,new-tab)"' % (command, url)
print "Running", command
error = self.shell(command)
if error:
raise RuntimeError("could not load new URL in the browser")
print "Sleeping %d seconds while page is loading." % (
options.reuse_wait)
time.sleep(options.reuse_wait / 2.0)
self.maximize()
time.sleep(options.reuse_wait / 2.0)
|
nico202/pyUniSR
|
setup.py
|
Python
|
gpl-2.0
| 919
| 0.020675
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used
|
for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pyUniSR",
version = "0.0.7",
author = "Nicolo Balzarotti",
author_email = "anothersms@gmail.com",
description = ("Python class to access studenti.unisr.it (Univ
|
erity Vita-Salute San Raffaele, Milano)"),
license = "GPLv2",
keywords = "unisr class milano university raffaele",
url = "https://github.com/nico202/pyUniSR",
packages=['UniSR'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
)
|
utisam/gfly
|
gfly/__init__.py
|
Python
|
gpl-3.0
| 4,933
| 0.036084
|
#-*- coding:utf-8 -*-
import string
from gi.repository import GObject, Gedit, Gtk, Pango
from settings import errorGenerator, jump_to_error_key, notification
ui_str = """<ui>
<menubar name="MenuBar">
<menu name="EditMenu" action="Edit">
<placeholder name="EditOps_6">
<menuitem name="gfly" action="gfly"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
def getLineStartToEnd(doc, line):
""" get two Gtk.TextIter, start and end of line
Attribute:
line: integer of line number(start 0)
"""
s = doc.get_iter_at_line(line)
e = s.copy()
e.forward_line()
return s, e
def skipWhiteSpaces(itr):
""" skip white spaces of Gtk.TextIter
"""
while itr.get_char() in string.whitespace and itr.forward_char():
pass
return itr
def getLanguageName(doc):
""" get document's languageName
Attribute:
doc: GeditDocument
"""
lang = doc.get_language()
if lang:
return lang.get_name()
class TabWatch:
def __init__(self, window):
self.errorTag = None
self.currentConnectedTab = None
self.currentConnectedDoc = None
self.geditWindow = window
#connect sindow signal
self.currentConnectedTab = window.connect("active_tab_changed", self.__tab_changed)
def close(self, window):
if not self.currentConnectedTab is None:
window.disconnect(self.currentConnectedTab)
self.currentConnectedTab = None
if not self.currentConnectedDoc is None:
window.disconnect(self.currentConnectedDoc)
self.currentConnectedDoc = None
def __tab_changed(self, window, tab):
doc = window.get_active_document()
#connect sindow signal
if not self.currentConnectedTab is None:
window.disconnect(self.currentConnectedTab)
self.currentConnectedTab = None
self.curren
|
tConnectedTab = window.connect("active_tab_chan
|
ged", self.__tab_changed)
#connect document signal
if not self.currentConnectedDoc is None:
window.disconnect(self.currentConnectedDoc)
self.currentConnectedDoc = None
doc.connect("saved", self.__doc_saved)
#connect view signal
tab.get_view().connect_after("move-cursor", self.__move_cursor)
#create tag for error
self.errorTag = doc.get_tag_table().lookup('errorTag')
if self.errorTag is None:
self.errorTag = doc.create_tag('errorTag', underline=Pango.Underline.ERROR)
self.draw_lines(doc)
def __doc_saved(self, doc, *args):
self.draw_lines(doc)
def draw_lines(self, doc):
# clear
s, e = doc.get_bounds()
doc.remove_tag(self.errorTag, s, e)
#generate error and apply new error tag
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
errorCount = 0
for g in errorGenerator[lang]:
try:
for i in g.generateErrorLines(doc.get_uri_for_display()):
s, e = getLineStartToEnd(doc, i - 1)
doc.apply_tag(self.errorTag, skipWhiteSpaces(s), e)
errorCount += 1
except EnvironmentError:
print "cannot generateErrorLines"
if notification:
self.errorNorify(errorCount)
def errorNorify(self, count):
if count <= 0:
return
try:
import pynotify
pynotify.init("gfly_notify")
if count == 1:
n = pynotify.Notification("gfly", "There is one error")
else:
n = pynotify.Notification("gfly", "There are %d error" % count)
n.show()
except ImportError:
pass
def __move_cursor(self, textview, *args):
global errorGenerator
doc = textview.get_buffer()
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
textview.set_has_tooltip(False)
cursorIter = doc.get_iter_at_mark(doc.get_insert())
cursorLine = cursorIter.get_line()
for g in errorGenerator[lang]:
if g.errorLineMsg.has_key(cursorLine + 1):
textview.set_has_tooltip(True)
textview.set_tooltip_text(g.errorLineMsg[cursorLine + 1])
def jump_error(self):
view = self.geditWindow.get_active_view()
doc = view.get_buffer()
lang = getLanguageName(doc)
if errorGenerator.has_key(lang):
cursorLine = doc.get_iter_at_mark(doc.get_insert()).get_line()
lines = []
for g in errorGenerator[lang]:
lines.extend(g.errorLineMsg.keys())
if len(lines) != 0:
lines.sort()
for i in lines:
if cursorLine < i - 1:
doc.goto_line(i - 1)
view.scroll_to_cursor()
return
doc.goto_line(lines[0] - 1)
view.scroll_to_cursor()
class gfly(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "gfly"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
def do_activate(self):
global ui_str
self.tabwatch = TabWatch(self.window)
manager = self.window.get_ui_manager()
self.action_group = Gtk.ActionGroup("gflyPluginAction")
self.action_group.add_actions([("gfly", None, "Jump Error", jump_to_error_key, None, self.__jump_error)])
manager.insert_action_group(self.action_group, -1)
self.ui_id = manager.add_ui_from_string(ui_str)
def do_deactivate(self):
self.tabwatch.close(self.window)
def do_update_state(self):
pass
def __jump_error(self, action):
self.tabwatch.jump_error()
|
acasadoquijada/Telegram-bot-stuff
|
Stuff/new_users_saver.py
|
Python
|
gpl-2.0
| 825
| 0.008485
|
###
|
#############################################################################
# new_users_saver funciton
################################################################################
def newusers(m):
dict_updater()
un = m.from_user.username
if un not in DBDIC:
uid = m.from_user.id
DBDIC[un] = [uid,0]
if hasattr(m, 'new_chat_participant'):
un = m.new_chat_participant.username
if un not in DBDIC:
|
uid = m.new_chat_participant.id
DBDIC[un] = [uid,0]
dict_saver()
################################################################################
# "newusers" saves new users in the dictionary
# (see dict_updater_saver.py for "dict_updater()" and "dict_saver()")
################################################################################
|
stefanpeidli/GoNet
|
Filters.py
|
Python
|
mit
| 15,577
| 0.004365
|
"""
@author: Stefan Peidli
License: MIT
Tags: Neural Network
"""
import numpy as np
from Board import Board
n = 9
# Testboards
def gen_test_board(method=0):
if method == 0:
b = np.zeros((n, n))
b[0, 2] = 1
b[1, 3] = 1
b[3, 3] = 1
b[2, 3] = -1
b[0, 1] = -1
b[1, 0] = -1
b[1, 1] = -1
b[2, 2] = 1
if method == 1:
b = np.round(np.random.uniform(-1, 1, (n, n)), 0)
return b
gen_test_board(1)
dxdys = [(1, 0), (0, 1), (-1, 0), (0, -1)]
# help functions
def is_on_board(n, x, y):
return 0 <= x < n and 0 <= y < n
# TODO da ist irgendwo ein fehler. libs stimmen manchmal nicht
def give_group_at_position(board, start_x, start_y):
group = [(start_x, start_y)]
checked = []
i = 0
liberts = 0
while i < len(group):
x, y = group[i]
i += 1
for dx, dy in dxdys:
adj_x, adj_y = x + dx, y + dy
if is_on_board(board.shape[0], adj_x, adj_y) and not (adj_x, adj_y) in group:
if board[adj_x, adj_y] == 0 and not (adj_x, adj_y) in checked:
liberts += 1
checked.append((adj_x, adj_y))
elif board[adj_x, adj_y] == board[start_x, start_y]:
group.append((adj_x, adj_y))
if board[start_x, start_y] == 0:
liberts = 0
return [group, liberts]
def give_liberties(board, color):
libs = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == color:
[_, li] = give_group_at_position(board, row, col)
libs[row, col] = li
return libs
# Filters
# Filters that are self-mappings
# Eyes . ID = 0
# shows the eyes of player color
def filter_eyes(board, color):
n = board.shape[0]
eyes = np.zeros((n, n))
board = board * color
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be eyes
if not(row == 0):
eyes[row, col] += board[row-1,col]
if not(row == n-1):
eyes[row, col] += board[row+1,col]
if not(col == 0):
eyes[row, col] += board[row,col-1]
if not(col == n-1):
eyes[row, col] += board[row,col+1]
eyes[0, :] += 1
eyes[-1, :] += 1
eyes[:, 0] += 1
eyes[:, -1] += 1
eyes[eyes != 4] = 0
eyes = eyes / 4
return eyes
# Shows which move will result in an eye being created (1) or destroyed (-1) . ID = 1.
# Note: Eyes by capture are created by capturing a single stone
def filter_eyes_create(board, color=1):
board.reshape((9, 9))
n = board.shape[0]
reyes = np.zeros((n, n))
eyc = np.sum(filter_eyes(board, color)) # current eyes
cap = filter_captures(board, color)
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # python magic
temp[row, col] = color
eyn = np.sum(filter_eyes(temp, color)) # eyes by free creation
# actually not good line below: we can also capture two single stones with one move..
if cap[row, col] == 1: # capture one eye
eyn += 1
reyes[row, col] = eyn - eyc
return reyes
# captures ID = 2
# Shows how many stones player "color" (1=b,-1=w) would capture by playing a move on a field
def filter_captures(board, color):
board.reshape((9, 9))
n = board.shape[0]
cap = np.zeros((n, n))
for row in range(n):
for col in range(n):
if board[row, col] == 0: # only free fields can be set
val = 0
if not(row == 0):
if color == board[row-1, col] * -1: # then is enemy
[group, libs] = give_group_at_position(board, row-1, col)
if libs == 1:
val = max(val, len(group))
if not(row == n-1):
if color == board[row+1, col] * -1:
[group, libs] = give_group_at_position(board, row+1, col)
if libs == 1:
val = max(val, len(group))
if not(col == 0):
if color == board[row, col-1] * -1:
[group, libs] = give_group_at_position(board, row, col-1)
if libs == 1:
val = max(val, len(group))
if not(col == n-1):
if color == board[row, col+1] * -1:
[group, libs] = give_group_at_position(board, row, col+1)
if libs == 1:
val = max(val, len(group))
cap[row, col] = val
return cap
# rewards connecting groups and adding liberties to groups. But e.g. punishes playing a move into an own eye. ID = 3.
def filter_add_liberties(board, color):
board.reshape((9, 9))
n = board.shape[0]
libmat = np.zeros((n, n))
for row in range(n):
for col in range(n):
val = 0
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # do not delete this
temp[row, col] = color
[g, li] = give_group_at_position(temp, row, col)
checked = []
neighbours = 0
if not(row == 0):
if color == board[row-1, col]:
[group, libs] = give_group_at_position(board, row-1, col)
val += li - libs
neighbours += 1
checked.extend(group)
if not(row == n-1):
if color == board[row+1, col]:
[group, libs] = give_group_at_position(board, row+1, col)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
if not(col == 0):
if color == board[row, col-1]:
[group, libs] = give_group_at_position(board, row, col-1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
if not(col == n-1):
if color == board[row, col+1]:
[group, libs] = give_group_at_position(board, row, col+1)
if group in checked:
libs = 0
else:
neighbours += 1
checked.extend(group)
val += li - libs
libmat[row, col] = val
return libmat
# measures total liberties added if move is played. ID = 4
def filter_liberization(board, color):
board.reshape((9, 9))
n = board.shape[0]
libmat = np.zeros((n, n))
for row in range(n):
for col in range(n):
val = 0
if board[row, col] == 0: # only free fields can be set
temp = board * 1 # do not delete
temp[row, col] = color
[g, li] = give_group_at_position(temp, row, col)
val = li
checked = []
neighbours = 0
if not(row == 0):
|
if color == board[row-1, col]:
|
[group, libs] = give_group_at_position(board, row-1, col)
val += - libs
neighbours += 1
checked.extend(group)
if not(row == n-1):
if color == board[row+1, col]:
[group, libs
|
kirienko/unseries
|
unseries.py
|
Python
|
gpl-3.0
| 9,529
| 0.002205
|
# encoding: utf8
from sympy import Add
from uncertainties import __version_info__ as uncert_version
from uncertainties import ufloat, ufloat_fromstr
from uncertainties.core import Variable, AffineScalarFunc
if uncert_version < (3, 0):
raise Warning("Your version of uncertanties is not supported. Try\n"
"$ sudo pip install uncertainties --upgrade")
class Series:
"""
The class that provides the expansion in powers of g up to the n-th order,
taking the error into account.
"""
def __init__(self, n, d={0: 0}, name='g', analytic=None):
"""
Example:
`z2 = Series(3, {0: ufloat(-1, 0.4), 1: ufloat(-2, .004), 2: ufloat(999, .1)})`
will give:
Z₂(g) = -1.0(4) - 2.000(4) g + 999.00(10) g²
:param n: number of the "known" orders, `int`
:param d: dictionary with k=powers, v=`ufloat`s
:param name: name of the series variable, arbitrary character, default is `'g'`
:param analytic: boolean
"""
self.n = n
self.gSeries = d
self.name = name
for k, v in d.items():
if isinstance(v, AffineScalarFunc):
self.gSeries[k] = v
elif isinstance(v, (list, tuple)):
self.gSeries[k] = ufloat(v[0], v[1])
elif isinstance(v, str):
self.gSeries[k] = ufloat_fromstr(v)
elif isinstance(v, int):
self.gSeries[k] = v
self.analytic = True
else:
raise TypeError("Series constructor warning: Type(v)={}".format(type(v)))
if analytic is not None:
# XXX: if defined explicitly:
self.analytic = bool(analytic)
else:
# XXX: if all values are ints assume analytic
self.analytic = all(map(lambda x: type(x) == int, d.values()))
for i in range(0, n):
if i not in d.keys():
if self.analytic:
self.gSeries[i] = 0
else:
self.gSeries[i] = ufloat(0, 0)
def __lt__(self, other):
return len(self.gSeries) < len(other.gSeries)
def __add__(self, other):
tmp = dict(self.gSeries)
# print "From __add__:",self.analytic," + ",other.pprint() ## FIXME
if isinstance(other, Series):
stop = min(self.n, other.n)
if stop == 0:
stop = max(self.n, other.n)
for g in other.gSeries.keys():
if g <= stop:
try:
tmp[g] += other.gSeries[g]
except KeyError:
tmp[g] = other.gSeries[g]
elif isinstance(other, (int, float)):
tmp[0] += other
else:
print("{} {}".format(type(self), type(other)))
raise NotImplementedError
return Series(len(tmp), tmp, name=self.name, analytic=self.analytic)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self + (-1) * other
def __mul__(self, other):
tmp = {}
if isinstance(other, Series):
stop = min(self.n, other.n)
for i in self.gSeries.keys():
for j in other.gSeries.keys():
if (i + j) <= stop:
try:
tmp[i + j] += self.gSeries[i] * other.gSeries[j]
except KeyError:
tmp[i + j] = self.gSeries[i] * other.gSeries[j]
res = Series(max(self.n, other.n), tmp, name=self.name, analytic=self.analytic)
elif isinstance(other, (int, float, Variable, AffineScalarFunc, Add)):
for i in self.gSeries.keys():
tmp[i] = self.gSeries[i] * other
res = Series(self.n, tmp, name=self.name, analytic=self.analytic)
elif other == 0 or sum(map(lambda v: v == 0, self.gSeries.values())) == len(self.gSeries):
return 0
# elif isinstance(other, sympy.core.add.Add):
# print "\n\nself=",self
# print "other=",other
# return 0
else:
print("\nself = {}, type(self) = {}".format(self.gSeries, type(self)))
print("\nother = {}, type(other) = {}".format(other, type(other)))
raise NotImplementedError
return res
def __rmul__(self, other):
return self * other
def __neg__(self):
return self * (-1)
def __invert__(self):
""" Z.__invert__() = 1/Z
1/(1+x)=Sum_i (-1)^i x^i
"""
res = Series(self.n, {}, self.name, analytic=self.analytic)
if self.gSeries[0] == 1:
c = 1.
normed_series = self + Series(self.n, {0: -1}, self.name, analytic=self.analytic) # <-- it's -1!
elif self.gSeries[0] != 0:
c = 1. / self.gSeries[0]
normed_series = self / self.gSeries[0] + Series(self.n, {0: -1}, self.name,
analytic=self.analytic) # <-- it's -1!
else:
raise NotImplementedError("no constant term in series: %s" % self.gSeries)
# if self.gSeries[0] == 1:
# tmp = Series(self.gSeries[1:], n = self.n-1, name=self.name)
# for i in range(tmp.n):
for i in range(len(self.gSeries)):
res += (-1) ** i * normed_series ** i
return res * c
def __div__(self, other):
""" For now we assume all the powers of g as non-negative
"""
if isinstance(other, Series):
return self * other.__invert__()
elif isinstance(other, (int, float, Variable, AffineScalarFunc)):
return self * (1. / other)
else:
raise NotImplementedError("type: {}; {}".format(type(other), other.__repr__()))
|
def __rdiv__(self, other):
return other * self.__invert__()
def __pow__(self, power, modulo=None):
if isinstance(power, int) and power > 1:
return reduce(lambda x, y: x * y, [self] * power)
elif isinstance(power, int)
|
and power == 1:
return self
elif isinstance(power, int) and power == 0:
if self.analytic:
return Series(self.n, {0: 1}, self.name, analytic=self.analytic)
else:
return Series(self.n, {0: ufloat(1, 0)}, self.name, analytic=self.analytic)
else:
print("power = {}, type(power) = {}".format(power, type(power)))
raise NotImplementedError
def diff(self):
"""
Differentiation of the polynomial in g
"""
res = {}
for i in range(len(self.gSeries) - 1):
res[i] = (i + 1) * self.gSeries[i + 1]
return Series(self.n, res, analytic=self.analytic)
def __repr__(self):
return self.pprint()
## FIXME
def _approx(self, other):
for k, v in self.gSeries.items():
if v != other.gSeries[k]:
return False
return True
def __str__(self):
"""
The result is truncated according to the error, indicating the accuracy of the least significant digit
"""
res = ''
for g, c in self.gSeries.items():
if c != 0 and g == 0 and isinstance(c, int):
res += " %d + " % (c)
elif c != 0 and g == 0:
res += " %s + " % (c.format('S'))
elif c != 0 and g <= self.n and isinstance(c, (Variable, AffineScalarFunc)):
if c.s < 1e-14:
res += "%s * %s**%s + " % (str(c.n), self.name, str(g))
else:
res += " %s * %s**%s + " % (c.format('S'), self.name, str(g))
elif c != 0 and g <= self.n and isinstance(c, (int, float)):
res += "%s * %s**%s + " % (str(c), self.name, str(g))
return res[:-3] or '0'
def coeffs(self):
"""
"""
return map(lambda x: float(x.format('S').split("(")[0]), self.gSeries.values())[:self.n + 1]
def pprint(self):
res = ""
for g, c in self.gSeries.it
|
Tydus/deluge
|
deluge/core/eventmanager.py
|
Python
|
gpl-3.0
| 3,162
| 0.001265
|
#
# eventmanager.py
#
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import logging
import deluge.component as component
log = logging.getLogger(__name__)
class EventManager(component.Component):
def __init__(self):
component.Component.__init__(self, "EventManager")
self.handlers = {}
def emit(self, event):
"""
Emits the event to interested clients.
:param event: DelugeEvent
"""
# Emit the event to the interested clients
component.get("RPCServer").emit_event(event)
# Call any handlers for the eve
|
nt
if event.name in self.handlers:
for handler in self.handlers[event.name
|
]:
#log.debug("Running handler %s for event %s with args: %s", event.name, handler, event.args)
try:
handler(*event.args)
except Exception, e:
log.error("Event handler %s failed in %s with exception %s", event.name, handler, e)
def register_event_handler(self, event, handler):
"""
Registers a function to be called when a `:param:event` is emitted.
:param event: str, the event name
:param handler: function, to be called when `:param:event` is emitted
"""
if event not in self.handlers:
self.handlers[event] = []
if handler not in self.handlers[event]:
self.handlers[event].append(handler)
def deregister_event_handler(self, event, handler):
"""
Deregisters an event handler function.
:param event: str, the event name
:param handler: function, currently registered to handle `:param:event`
"""
if event in self.handlers and handler in self.handlers[event]:
self.handlers[event].remove(handler)
|
Bideau/SmartForrest
|
RaspberryPi/dataBase/mysql/CreateMysqlTable.py
|
Python
|
mit
| 6,863
| 0.006994
|
#!/usr/bin/python
# -*- coding: utf-8 -*
"""
The MIT License (MIT)
Copyright (c) 2015 Christophe Aubert
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = "Christophe Aubert"
__version__ = "1.0"
import SqlCommand
class CreateMysqlTable(SqlCommand.SqlCommand):
def __init__(self):
self.sqlCommand = []
def createTable(self):
# Table: sensorType
#------------------------------------------------------------
sensorType = "CREATE TABLE sensorType( "\
"st_id INT (11) Auto_increment NOT NULL ,"\
"st_type VARCHAR (50) NOT NULL ,"\
"PRIMARY KEY (st_id )"\
")ENGINE=InnoDB;"
self.sqlCommand.append(sensorType)
#------------------------------------------------------------
# Table: measure
#------------------------------------------------------------
measure = "CREATE TABLE measure( "\
"m_id INT (11) Auto_increment NOT NULL ,"\
"m_date INT NOT NULL ,"\
"m_value FLOAT NOT NULL ,"\
"s_id INT NOT NULL ,"\
"PRIMARY KEY (m_id )"\
")ENGINE=InnoDB;"
self.sqlCommand.append(measure)
#------------------------------------------------------------
# Table: sensor
#------------------------------------------------------------
sensor = "CREATE TABLE sensor( "\
"s_id INT (11) Auto_increment NOT NULL , "\
"st_id INT NOT NULL , "\
"sta_id INT NOT NULL , "\
"PRIMARY KEY (s_id ) "\
")ENGINE=InnoDB;"
self.sqlCommand.append(sensor)
#------------------------------------------------------------
# Table: station
#------------------------------------------------------------
station = "CREATE TABLE station( "\
"sta_id INT (11) Auto_increment NOT NULL , "\
"sta_name VARCHAR (50) NOT NULL , "\
"sta_longitude FLOAT NOT NULL , "\
"sta_latitude FLOAT NOT NULL, "\
"sta_installDate INT NOT NULL, "\
"PRIMARY KEY (sta_id ) "\
")ENGINE=InnoDB;"
self.sqlCommand.append(station)
#------------------------------------------------------------
# Table: user
#------------------------------------------------------------
user = "CREATE TABLE user ( "\
"u_id INT (11) Auto_increment NOT NULL,"\
"u_lastName VARCHAR(30) NOT NULL,"\
"u_firstName VARCHAR(30) NOT NULL,"\
"u_description VARCHAR(200) NOT NULL,"\
"PRIMARY KEY (u_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(user)
#------------------------------------------------------------
# Table: connection
#------------------------------------------------------------
connection = "CREATE TABLE connection ( "\
"c_id INT (11) Auto_increment NOT NULL,"\
"u_id INT NOT NULL,"\
"c_login VARCHAR(30) NOT
|
NULL,"\
"c_password VARCHAR (50) NOT NULL ,"\
"c_adminKey BOOLEAN DEFAULT
|
NULL,"\
"PRIMARY KEY(c_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(connection)
stationAccess = "CREATE TABLE stationAccess ( "\
"staa_id INT (11) Auto_increment NOT NULL,"\
"u_id INT NOT NULL ,"\
"sta_id INT NOT NULL ,"\
"PRIMARY KEY(staa_id)"\
")ENGINE=InnoDB;"
self.sqlCommand.append(stationAccess)
#------------------------------------------------------------
# ALTER TABLE
#------------------------------------------------------------
atMeasure = "ALTER TABLE measure ADD CONSTRAINT FK_measure_s_id "\
"FOREIGN KEY (s_id) REFERENCES sensor(s_id);"
self.sqlCommand.append(atMeasure)
atsensor = "ALTER TABLE sensor ADD CONSTRAINT FK_sensor_st_id "\
"FOREIGN KEY (st_id) REFERENCES sensorType(st_id);"
self.sqlCommand.append(atsensor)
atsensor2 = "ALTER TABLE sensor ADD CONSTRAINT FK_sensor_sta_id "\
"FOREIGN KEY (sta_id) REFERENCES station(sta_id);"
self.sqlCommand.append(atsensor2)
atConnection = "ALTER TABLE connection ADD CONSTRAINT FK_connection_u_id "\
"FOREIGN KEY (u_id) REFERENCES user(u_id)"
self.sqlCommand.append(atConnection)
atstationAccess = "ALTER TABLE stationAccess ADD CONSTRAINT FK_stationAccess_u_id "\
"FOREIGN KEY (u_id) REFERENCES user(u_id)"
self.sqlCommand.append(atstationAccess)
atstationAccess2 = "ALTER TABLE stationAccess ADD CONSTRAINT FK_stationAccess_sta_id "\
"FOREIGN KEY (sta_id) REFERENCES station(sta_id)"
self.sqlCommand.append(atstationAccess2)
def getSQL(self):
return self.sqlCommand
|
vim-scripts/TeX-9
|
ftplugin/tex_nine/tex_nine_utils.py
|
Python
|
gpl-3.0
| 3,875
| 0.005935
|
# -*- coding: utf-8 -*-
#************************************************************************
#
# TeX-9 library: Python module
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Elias Toivanen, 2011-2014
#
#************************************************************************
import re
import vim
import sys
# Utility functions
def echoerr(errorstr):
sys.stderr.write("TeX-9: {0}\n".format(str(errorstr)))
def echomsg(msgstr):
sys.stdout.write("TeX-9: {0}\n".format(str(msgstr)))
def get_latex_environment(vim_window):
"""Get information about the current LaTeX environment.
Returns a dictionary with keys
'environment': the name of the current LaTeX environment
'range': 2-tuple of the beginning and ending line numbers
"""
pat = re.compile(r'^\s*\\(begin|end){([^}]+)}')
b = list(vim_window.buffer)
row = vim_window.cursor[0] - 1
environment = ""
begin = end = 0
current_line = b[row]
head = b[row - 1::-1] # From line above to the start
tail = b[row + 1:] # From next line to the end
c = pat.match(current_line)
if c:
environment = c.group(2)
if c.group(1) == 'end':
end = row + 1
elif c.group(1) == 'begin':
begin = row + 1
if not begin:
envs = {}
for i, line in enumerate(head):
m = pat.match(line)
if m:
e = m.group
|
(2)
envs[m.groups()] = i
if ('begin', e) in envs and ('end', e) in envs and envs[('end', e)] < envs[('begin', e)]:
# Eliminate nested environments
del envs[('begin', e)]
del envs[('end', e)]
elif ('end', e) not in envs:
begin = row - i
|
environment = e
break
if not end:
envs = {}
for i, line in enumerate(tail):
m = pat.match(line)
if m:
envs[m.groups()] = i
e = m.group(2)
if ('begin', e) in envs and ('end', e) in envs:
#and envs[('end', e)] > envs[('begin', e)]:
# Eliminate nested environments
del envs[('begin', e)]
del envs[('end', e)]
elif m.groups() == ('end', environment):
end = row + i + 2
break
return {'environment': environment, 'range': (begin, end)}
def is_latex_math_environment(vim_window,
environments = re.compile(r"matrix|cases|math|equation|align|array")):
"""Returns True if the cursor is currently on a maths environment."""
e = get_latex_environment(vim_window)
return bool(environments.search(e['environment']))
def find_compiler(vimbuffer, nlines=10):
"""Finds the compiler from the header."""
lines = "\n".join(vimbuffer[:nlines])
if lines:
c = re.search("^%\s*Compiler:\s*(\S+)", lines, re.M)
if c:
return c.group(1).strip()
else:
return ""
else:
#Cannot determine the compiler
return ""
class TeXNineError(Exception):
pass
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_htmlparser.py
|
Python
|
mit
| 30,373
| 0.001152
|
"""Tests for HTMLParser.py."""
import html.parser
import pprint
import unittest
from test import support
class EventCollector(html.parser.HTMLParser):
def __init__(self, *args, **kw):
self.events = []
self.append = self.events.append
html.parser.HTMLParser.__init__(self, *args, **kw)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def get_collector(self):
raise NotImplementedError
def _run_check(self, source, expected_events, collector=None):
if collector is None:
collector = self.get_collector()
parser = collector
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self
|
.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra())
def _parse_error(self, source):
def parse(source=source):
parser = self.get_collector()
parser.feed(source)
parser.close()
|
self.assertRaises(html.parser.HTMLParseError, parse)
class HTMLParserStrictTestCase(TestCaseBase):
def get_collector(self):
with support.check_warnings(("", DeprecationWarning), quite=False):
return EventCollector(strict=True)
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b-->
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_malformatted_charref(self):
self._run_check("<p>&#bad;</p>", [
("starttag", "p", []),
("data", "&#bad;"),
("endtag", "p"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
dtds = ['HTML', # HTML5 doctype
('HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" '
'"http://www.w3.org/TR/html4/strict.dtd"'),
('HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" '
'"http://www.w3.org/TR/html4/loose.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"'),
('html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd"'),
('math PUBLIC "-//W3C//DTD MathML 2.0//EN" '
'"http://www.w3.org/Math/DTD/mathml2/mathml2.dtd"'),
('html
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/demo/PopupMenu.py
|
Python
|
mit
| 4,928
| 0.004261
|
#!/usr/bin/env python
import wx
import images
#----------------------------------------------------------------------
text = """\
Right-click on any bare area of this panel (or Ctrl-click on Macs
if you don't have a multi-button mouse) to show a popup menu.
Then look at the code for this sample. Notice how the PopupMenu
method is similar to the ShowModal method of a wx.Dialog in that
it doesn't return until the popup menu has been dismissed. The
event handlers for the popup menu items can either be attached to
the menu itself, or to the window that invokes PopupMenu.
"""
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
box = wx.BoxSizer(wx.VERTICAL)
# Make and layout the controls
fs = self.GetFont().GetPointSize()
bf = wx.Font(fs+4, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
nf = wx.Font(fs+2, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
t = wx.StaticText(self, -1, "PopupMenu")
t.SetFont(bf)
box.Add(t, 0, wx.CENTER|wx.ALL, 5)
box.Add(wx.StaticLine(self, -1), 0, wx.EXPAND)
box.Add((10,20))
t = wx.StaticText(self, -1, text)
t.SetFont(nf)
box.Add(t, 0, wx.CENTER|wx.ALL, 5)
t.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
self.SetSizer(box)
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnContextMenu(self, event):
self.log.WriteText("OnContextMenu\n")
# only do this part the first time so the events are only bound once
#
# Yet another anternate way to do IDs. Some prefer them up top to
# avoid clutter, some prefer them clo
|
se to the object of interest
# for clarity.
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.popupID6 = wx.NewId()
self.popupID7 = wx.NewId()
self.popupID8
|
= wx.NewId()
self.popupID9 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnPopupOne, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.OnPopupTwo, id=self.popupID2)
self.Bind(wx.EVT_MENU, self.OnPopupThree, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.OnPopupFour, id=self.popupID4)
self.Bind(wx.EVT_MENU, self.OnPopupFive, id=self.popupID5)
self.Bind(wx.EVT_MENU, self.OnPopupSix, id=self.popupID6)
self.Bind(wx.EVT_MENU, self.OnPopupSeven, id=self.popupID7)
self.Bind(wx.EVT_MENU, self.OnPopupEight, id=self.popupID8)
self.Bind(wx.EVT_MENU, self.OnPopupNine, id=self.popupID9)
# make a menu
menu = wx.Menu()
# Show how to put an icon in the menu
item = wx.MenuItem(menu, self.popupID1,"One")
bmp = images.Smiles.GetBitmap()
item.SetBitmap(bmp)
menu.Append(item)
# add some other items
menu.Append(self.popupID2, "Two")
menu.Append(self.popupID3, "Three")
menu.Append(self.popupID4, "Four")
menu.Append(self.popupID5, "Five")
menu.Append(self.popupID6, "Six")
# make a submenu
sm = wx.Menu()
sm.Append(self.popupID8, "sub item 1")
sm.Append(self.popupID9, "sub item 1")
menu.Append(self.popupID7, "Test Submenu", sm)
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu)
menu.Destroy()
def OnPopupOne(self, event):
self.log.WriteText("Popup one\n")
def OnPopupTwo(self, event):
self.log.WriteText("Popup two\n")
def OnPopupThree(self, event):
self.log.WriteText("Popup three\n")
def OnPopupFour(self, event):
self.log.WriteText("Popup four\n")
def OnPopupFive(self, event):
self.log.WriteText("Popup five\n")
def OnPopupSix(self, event):
self.log.WriteText("Popup six\n")
def OnPopupSeven(self, event):
self.log.WriteText("Popup seven\n")
def OnPopupEight(self, event):
self.log.WriteText("Popup eight\n")
def OnPopupNine(self, event):
self.log.WriteText("Popup nine\n")
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>PopupMenu</center></h2>
""" + text + """
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
TheWardoctor/Wardoctors-repo
|
script.module.fantastic/lib/resources/lib/sources/en/sceper.py
|
Python
|
apache-2.0
| 6,854
| 0.018675
|
# -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['sceper.ws','sceper.unblocked.pro']
self.base_link = 'https://sceper.unblocked.pro'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = zip(client.parseDOM(c, 'a', ret='href'), client.parseDOM(c, 'a'))
u = [(i[0], i[1], re.findall('PT(\d+)$', i[1])) for i in u]
u = [(i[0], i[1]) for i in u if not i[2]]
if 'tvshowtitle' in data:
u = [([x for x in i[0].strip('//').split('/')][-1], i[0]) for i in u]
else:
u = [(t, i[0], s) for i in u]
items += u
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception
|
()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', '
|
dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
|
stxnext/intranet-open
|
src/intranet3/intranet3/views/times/tickets.py
|
Python
|
mit
| 4,991
| 0.004809
|
from __future__ import with_statement
from pyramid.view import view_config
from pyramid.renderers import render
from intranet3.utils.views import BaseView
from intranet3.models import (
User,
TimeEntry,
Tracker,
Project,
Client,
DBSession,
)
from intranet3.forms.times import ProjectsTimeForm, TimeEntryForm
from intranet3.log import INFO_LOG, WARN_LOG, ERROR_LOG, DEBUG_LOG, EXCEPTION_LOG
from intranet3.lib.times import TimesReportMixin, HTMLRow, dump_entries_to_excel
LOG = INFO_LOG(__name__)
WARN = WARN_LOG(__name__)
ERROR = ERROR_LOG(__name__)
DEBUG = DEBUG_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
MAX_TIMEOUT = 20 # DON'T WAIT LONGER THAN DEFINED TIMEOUT
MAX_TICKETS_PER_REQUEST = 50 # max number of ticket ids to include in a single request to tracker
@view_config(route_name='times_tickets_excel', permission='can_view_time_report')
class Excel(BaseView):
def get(self):
client = self.request.user.get_client()
form = ProjectsTimeForm(formdata=self.request.GET, client=client)
if not form.validate():
return render('time/tickets_report/projects_report.html', dict(form=form))
query = DBSession.query
start_date, end_date = form.date_range.data
projects = form.projects.data
users = form.users.data
ticket_choice = form.ticket_choice.data
group_by = (
form.group_by_client.data,
form.group_by_project.data,
form.group_by_bugs.data,
form.group_by_user.data
)
bigger_than = form.bigger_than.data
LOG(u'Tickets report %r - %r - %r' % (start_date, end_date, projects))
uber_query = query(Client, Project, TimeEntry.ticket_id, User, Tracker, TimeEntry.description, TimeEntry.date, TimeEntry.time)
uber_query = uber_query.filter(TimeEntry.user_id==User.id)\
.filter(TimeEntry.project_id==Project.id)\
.filter(Project.tracker_id==Tracker.id)\
.filter(Project.client_id==Client.id)
if projects:
uber_query = uber_query.filter(TimeEntry.project_id.in_(projects))
uber_query = uber_query.filter(TimeEntry.date>=start_date)\
.filter(TimeEntry.date<=end_date)\
.filter(TimeEntry.deleted==False)
if ticket_choice == 'without_bug_only':
uber_query = uber_query.filter(TimeEntry.ticket_id=='')
elif ticket_choice == 'meetings_only':
meeting_ids = [t['value'] for t in TimeEntryForm.PREDEFINED_TICKET_IDS]
uber_query = uber_query.filter(TimeEntry.ticket_id.in_(meeting_ids))
if users:
uber_query = uber_query.filter(User.id.in_(users))
uber_query = uber_query.order_by(Client.name, Project.name, TimeEntry.ticket_id, User.name)
entries = uber_query.all()
file, response = dump_entries_to_excel(entries, group_by, bigger_than)
return response
@view_config(route_name='times_tickets_report', permission='can_vi
|
ew_time_report')
class Report(TimesReportMixin, BaseView):
def dispatch(self):
clie
|
nt = self.request.user.get_client()
form = ProjectsTimeForm(self.request.GET, client=client)
if not self.request.GET or not form.validate():
return dict(form=form)
start_date, end_date = form.date_range.data
projects = form.projects.data
if not projects:
projects = [p[0] for p in form.projects.choices]
bug_id = self.request.GET.get('bug_id')
users = form.users.data
bigger_than = form.bigger_than.data
ticket_choice = form.ticket_choice.data
group_by = (
form.group_by_client.data,
form.group_by_project.data,
form.group_by_bugs.data,
form.group_by_user.data
)
LOG(u'Tickets report %r - %r - %r' % (start_date, end_date, projects))
uber_query = self._prepare_uber_query(
start_date, end_date, projects, users, ticket_choice, bug_id
)
entries = uber_query.all()
participation_of_workers = self._get_participation_of_workers(entries)
tickets_id = ','.join([str(e[2]) for e in entries])
trackers_id = ','.join([str(e[4].id) for e in entries])
rows, entries_sum = HTMLRow.from_ordered_data(entries, group_by, bigger_than)
return dict(
rows=rows,
entries_sum=entries_sum,
form=form,
participation_of_workers=participation_of_workers,
participation_of_workers_sum=sum([time[1] for time in participation_of_workers]),
trackers_id=trackers_id, tickets_id=tickets_id,
str_date=self._sprint_daterange(start_date, end_date),
)
def _sprint_daterange(self, st, end):
return '%s - %s' % (st.strftime('%d-%m-%Y'), end.strftime('%d-%m-%Y'))
|
geomf/omf-fork
|
omf/hdfs.py
|
Python
|
gpl-2.0
| 7,235
| 0.003179
|
# Open Modeling Framework (OMF) Software for simulating power systems behavior
# Copyright (c) 2015, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
import pyhdfs
import shutil
import vcap_parser
import os
from os.path import join as pJoin
from models.__metaModel__ import _omfDir
class Hdfs(object):
HOME_DIR = '/user/omf/' # + vcap_parser.get_space_name() + '/'
populated = False
def __init__(self):
self.hdfs = pyhdfs.HdfsClient(self.__get_all_namenodes())
def __get_all_namenodes(self):
namenodes = []
credentials = vcap_parser.get_service_credentials('hdfs')
cluster_config = credentials['HADOOP_CONFIG_KEY']
names = cluster_config['dfs.ha.namenodes.nameservice1'].split(',')
for name in names:
namenodes.append(cluster_config['dfs.namenode.http-address.nameservice1.' + name])
return namenodes
def create_dir(self, path):
print 'HDFS: Creating directory', path
self.hdfs.mkdirs(Hdfs.HOME_DIR + path)
def listdir(self, directory):
return self.hdfs.listdir(Hdfs.HOME_DIR + directory)
def is_dir(self, directory):
print self.hdfs.get_file_status(Hdfs.HOME_DIR + directory).type
return self.hdfs.get_file_status(Hdfs.HOME_DIR + directory).type == "DIRECTORY"
def remove(self, path):
try:
self.hdfs.delete(Hdfs.HOME_DIR + path, recursive=True)
except pyhdfs.HdfsPathIsNotEmptyDirectoryException:
self.hdfs.delete(Hdfs.HOME_DIR + path + "/*")
self.hdfs.delete(Hdfs.HOME_DIR + path)
def stat(self, path):
|
status = self.hdfs.get_file_status(Hdfs.HOME_DIR + path)
return status
def get_file_modification_time(self, path):
return self.hdfs.get_file_status(Hdfs.HOME_DIR + path).modificationTime / 1000
def exists(self, path):
return self.hdfs.exists(Hdfs.HOME_DIR + path)
def open(self, path):
f = self.hdfs
|
.open(Hdfs.HOME_DIR + path)
print "Opening file: " + path + ". Type is: " + str(type(f))
return f
def save(self, path, content):
try:
self.hdfs.create(Hdfs.HOME_DIR + path, content)
except pyhdfs.HdfsFileAlreadyExistsException:
self.hdfs.delete(Hdfs.HOME_DIR + path)
self.hdfs.create(Hdfs.HOME_DIR + path, content)
def walk(self, path):
print "Walk in path: " + path
return self.hdfs.walk(Hdfs.HOME_DIR + path)
def copy_within_fs(self, source, target):
print "HDFS: Copy within fs: copying to local... from " + _omfDir + "/tmp/" + source + " to: " + Hdfs.HOME_DIR + target
if not os.path.exists(pJoin(_omfDir, "tmp", source)):
os.makedirs(pJoin(_omfDir, "tmp", source))
self.hdfs.copy_to_local(Hdfs.HOME_DIR + source, pJoin(_omfDir, "tmp", source))
try:
print "HDFS: Copy within fs: copying from local... from: " + Hdfs.HOME_DIR + target + " to: " + _omfDir + "/tmp/" + source
self.hdfs.copy_from_local(pJoin(_omfDir, "tmp", source), Hdfs.HOME_DIR + target)
except pyhdfs.HdfsFileAlreadyExistsException:
print "HDFS: Copy within fs: file existed before :("
self.hdfs.delete(Hdfs.HOME_DIR + target)
self.hdfs.copy_from_local(pJoin(_omfDir, "tmp", source), Hdfs.HOME_DIR + target)
def export_to_hdfs(self, directory, file_to_export):
print 'HDFS: Copying file from local filesystem at ' + file_to_export.filename + ' to HDFS at ' + Hdfs.HOME_DIR + file_to_export.filename
self.hdfs.copy_from_local(file_to_export.filename, pJoin(Hdfs.HOME_DIR, directory, file_to_export.filename),
overwrite=True)
return True
def export_local_to_hdfs(self, directory, file_to_export):
filename = file_to_export.split("/")[-1]
print 'HDFS: Copying file from local filesystem at ' + file_to_export + ' to HDFS at ' + Hdfs.HOME_DIR + directory + "/" + filename
self.hdfs.copy_from_local(file_to_export, pJoin(Hdfs.HOME_DIR, directory, filename), overwrite=True)
return True
def export_from_fs_to_local(self, source, target):
directory = os.path.split(target)[0]
if not os.path.isdir(directory):
os.makedirs(directory)
self.hdfs.copy_to_local(Hdfs.HOME_DIR + source, pJoin(_omfDir, target))
def import_files_to_hdfs(self, local_directory, hdfs_directory):
print "Exporting files from local directory: " + local_directory + " to hdfs directory: " + hdfs_directory
self.create_dir(hdfs_directory)
for f in os.listdir(local_directory):
self.export_local_to_hdfs(hdfs_directory, pJoin(local_directory, f))
return True
def recursive_import_to_hdfs(self, start_dir):
self.create_dir(start_dir)
for f in os.listdir(pJoin(_omfDir, start_dir)):
if os.path.isdir(pJoin(_omfDir, start_dir, f)):
self.create_dir(pJoin(start_dir, f))
self.recursive_import_to_hdfs(pJoin(start_dir, f))
else:
self.export_local_to_hdfs(start_dir, pJoin(_omfDir, start_dir, f))
return True
def populate_hdfs(self):
template_files = []
model_files = []
try:
template_files = ["templates/" + x for x in self.listdir("templates")]
except:
print "importing templates to hdfs"
if self.import_files_to_hdfs("templates", "templates"):
template_files = ["templates/" + x for x in self.listdir("templates")]
shutil.rmtree("templates")
try:
model_files = ["models/" + x for x in self.listdir("models") if not (x.endswith('.pyc') or x.endswith('.py'))]
except:
print "importing models to hdfs"
if self.import_files_to_hdfs("models", "models"):
model_files = ["models/" + x for x in self.listdir("models")]
shutil.rmtree("models")
try:
if not self.exists("data"):
self.recursive_import_to_hdfs("data")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
try:
if not self.exists("static"):
self.recursive_import_to_hdfs("static")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
self.populated = True
return template_files, model_files
def populate_local(self):
if not os.path.exists("data"):
try:
self.export_from_fs_to_local("data", "data")
except Exception as e:
print "Could not import data.... Reason: " + str(e)
else:
print "Data directory already exists."
|
linfanangel/Trality
|
cart/cartapp/permission.py
|
Python
|
gpl-3.0
| 268
| 0.003731
|
from rest_framework import permissions
class IsReadOnly(permissions.BasePermission):
def has_object_permissi
|
on(self, request, view, obj):
|
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == self.request.user
|
PrairieLearn/PrairieLearn
|
exampleCourse/questions/workshop/Lesson1_example3_v3/server.py
|
Python
|
agpl-3.0
| 1,005
| 0.01393
|
import random
def generate(data):
ask = ['equivalent resistance $R_T$', 'current from the power supply $I_T$']
which = random.choice([0,1])
data['para
|
ms']['ask'] = ask[which]
label = ["$R_T$", "$I_T$"]
data['params']['lab'] = label[which]
unit = ["$\\Omega$", "
|
A"]
data['params']['unit'] = unit[which]
Vt = random.randint(100,200)
data['params']['Vt'] = Vt
R1 = random.choice(list(range(20,180,10)))
data['params']['R1'] = R1
R2 = random.choice(list(range(20,180,20)))
data['params']['R2'] = R2
R3 = random.choice(list(range(20,100,5)))
data['params']['R3'] = R3
figname = ["circ1.png", "circ2.png"]
whichfig = random.choice([0,1])
data['params']['figname'] = figname[whichfig]
if whichfig: # this is the series
Rt = R1 + R2 + R3
else: # this is the parallel
Rtinv = 1/R1 + 1/R2 + 1/R3
Rt = 1/Rtinv
It = Vt/Rt
ans = [Rt, It]
data['correct_answers']['ans'] = ans[which]
|
zqfan/leetcode
|
algorithms/437. Path Sum III/solution.py
|
Python
|
gpl-3.0
| 748
| 0
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
if not root:
return 0
left = self.pathSum(root.left, sum)
right = self.pathSum(root.right, sum)
return self.rootSum(root, sum) + left + right
def rootSum(self, root, sum):
if not root:
return 0
left = self.rootSum(root.left, sum - root.val)
right = self.rootSum(root.right, sum - root.val)
retu
|
rn (root
|
.val == sum) + left + right
|
jumpserver/jumpserver
|
apps/tickets/serializers/ticket/meta/ticket_type/login_asset_confirm.py
|
Python
|
gpl-3.0
| 591
| 0.003431
|
from rest_framework import serializers
from django.utils.translation import ugettext_lazy as _
__all__ = [
'ApplySerializer', 'LoginAssetConfirmSerializer',
]
class ApplySerializer(serializers.Serializer):
# 申请信息
apply_login_user = serializers.Char
|
Field(required=True, label=_('Login user'))
apply_login_asset = serializers.CharField(required=True, label=_('Login asset'))
apply_login_system_use
|
r = serializers.CharField(
required=True, max_length=64, label=_('Login system user')
)
class LoginAssetConfirmSerializer(ApplySerializer):
pass
|
celebdor/kuryr-libnetwork
|
kuryr_libnetwork/schemata/request_pool.py
|
Python
|
apache-2.0
| 2,102
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr_libnetwork.schemata import commons
REQUEST_POOL_SCHEMA = {
u'links': [{
u'method': u'POST',
u'href': u'/IpamDriver.RequestPool',
u'description': u'Allocate pool of ip addresses',
u'rel': u'self',
u'title': u'Create'
}],
u'title': u'Create pool',
u'required': [u'AddressSpace', u'Pool', u'SubPool', u'V6'],
u'definitions': {u'commons': {}},
u'$schema': u'http://json-schema.org/draft-04/hyper-schema',
u'type': u'object',
u'properties': {
u'AddressSpace': {
u'description': u'The name of the address s
|
pace.',
u'type': u'string',
u'example': u'foo',
},
u'Pool': {
u'description': u'A range of IP Addresses represented in '
u'CIDR format address/mask.',
u'$ref': u'#/definitions/commons/definitions/cidr'
},
u'SubPool': {
u'description': u'A subset of IP range from Pool in'
u'CIDR format address/mask.',
u'$ref
|
': u'#/definitions/commons/definitions/cidr'
},
u'Options': {
u'type': [u'object', u'null'],
u'description': u'Options',
u'example': {},
},
u'V6': {
u'description': u'If set to "True", requesting IPv6 pool and '
u'vice-versa.',
u'type': u'boolean',
u'example': False
}
}
}
REQUEST_POOL_SCHEMA[u'definitions'][u'commons'] = commons.COMMONS
|
inventree/InvenTree
|
InvenTree/order/migrations/0042_auto_20210310_1619.py
|
Python
|
mit
| 972
| 0.002058
|
# Generated by Django 3.0.7 on 2021-03-10 05:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0005_owner_model'),
('order', '0041_auto_20210114_1728'),
]
operations = [
migrations.AddField(
model_name='purchaseorder',
name='responsible',
field=models.ForeignKey(blank=True, help_text='User or group responsible for this order', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='users.Owner', verbose_name='Responsible'),
),
migrations.AddField(
model_name='salesorder',
name='responsible',
field=model
|
s.ForeignKey(blank=True, help_text='User or group responsible for this order', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='users.O
|
wner', verbose_name='Responsible'),
),
]
|
suutari-ai/shoop
|
shuup/addons/admin_module/views/reload.py
|
Python
|
agpl-3.0
| 3,004
| 0.001664
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import time
from django import forms
from django.conf import settings
from django.core.management import call_command
from django.http.response import HttpResponse, JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from six import StringIO
from shuup.addons.manager import get_enabled_addons
from shuup.addons.reloader import get_reload_method_classes
from shuup.apps.settings import reload_apps
from shuup.utils.excs import Problem
from shuup.utils.iterables import first
class ReloadMethodForm(forms.Form):
def get_viable_reload_methods(self):
for klass in get_reload_method_classes():
rm = klass()
if rm.is_viable():
yield rm
def __init__(self, **kwargs):
super(ReloadMethodForm, self).__init__(**kwargs)
self.reload_methods = list(self.get_viable_reload_methods())
if not self.reload_methods:
raise Problem(_("There are no viable reload methods available. Please contact your system administrator."))
self.fields["reload_method"] = forms.ChoiceField(
choices=[(rm.identifier, rm.title) for rm in self.reload_methods],
label
|
=_("Reload Method"),
initial=self.reload_methods[0].identifier,
widget=forms.RadioSelect
)
def get_selected_reload_method(self):
return first(rm for rm in self.reload_methods if rm.identifier == self.cleaned_data["reload_method"])
def finalize_installation_for_enabled_ap
|
ps():
out = StringIO()
enabled_addons = get_enabled_addons(settings.SHUUP_ENABLED_ADDONS_FILE)
new_apps = [app for app in enabled_addons if app not in settings.INSTALLED_APPS]
if new_apps:
out.write("Enabling new addons: %s" % new_apps)
settings.INSTALLED_APPS += type(settings.INSTALLED_APPS)(new_apps)
reload_apps()
call_command("migrate", "--noinput", "--no-color", stdout=out)
call_command("collectstatic", "--noinput", "--no-color", stdout=out)
return out.getvalue()
class ReloadView(FormView):
template_name = "shuup/admin/addons/reload.jinja"
form_class = ReloadMethodForm
def form_valid(self, form):
reloader = form.get_selected_reload_method()
reloader.execute()
return HttpResponse(_("Reloading.")) # This might not reach the user...
def get(self, request, *args, **kwargs):
if request.GET.get("ping"):
return JsonResponse({"pong": time.time()})
elif request.GET.get("finalize"):
return JsonResponse({"message": finalize_installation_for_enabled_apps()})
return super(ReloadView, self).get(request, *args, **kwargs)
|
erickdom/restAndroid
|
transactions/migrations/0002_transaction_response.py
|
Python
|
apache-2.0
| 436
| 0.002294
|
# -*- coding: utf-8 -*-
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transactions', '0001_initial'),
]
|
operations = [
migrations.AddField(
model_name='transaction',
name='response',
field=models.CharField(default=b'', max_length=4, null=True, blank=True),
),
]
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.7/Lib/doctest.py
|
Python
|
mit
| 101,750
| 0.001179
|
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
return open(filename).read(), filename
# Use sys.stdout encoding for ouput.
_encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8'
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
If the string `s` is Unicode, it is encoded using the stdout
encoding and the `backslashreplace` error handler.
"""
if isinstance(s, unicode):
s = s.encode(_encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a st
|
ring containing a traceback message for the given
exc_info tuple (a
|
s returned by sys.exc_info()).
"""
# Get a traceback mess
|
phw/weblate
|
weblate/formats/tests/test_convert.py
|
Python
|
gpl-3.0
| 4,423
| 0.000452
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""File format specific behavior."""
from weblate.formats.c
|
onvert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
PlainTextFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = g
|
et_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
TEST_TXT = get_test_file("cs.txt")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"
COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
class PlainTextFormatTest(ConvertFormatTest):
FORMAT = PlainTextFormat
FILE = TEST_TXT
BASE = TEST_TXT
MIME = "text/plain"
EXT = "txt"
COUNT = 5
MASK = "txt/*.txt"
EXPECTED_PATH = "txt/cs_CZ.txt"
MATCH = "Hello"
FIND_CONTEXT = "cs.txt:2"
FIND_MATCH = "Hello, world!"
EDIT_OFFSET = 1
|
trombastic/PyScada
|
pyscada/modbus/migrations/0004_auto_20160115_0920.py
|
Python
|
gpl-3.0
| 454
| 0
|
# -*- coding: utf-8 -*-
fro
|
m __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pyscada', '0010_auto_20160115_0918'),
('modbus', '0003_auto_20160115_0918'),
]
operations = [
migrations.RenameField(
model_name='modbusdevice',
old_name='modbus_client',
new_name='modbus_device',
|
),
]
|
savoirfairelinux/secure-odoo
|
action_access_control_list/models/ir_rule.py
|
Python
|
lgpl-3.0
| 486
| 0
|
# -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.
|
0 or later (http://www.gnu.org/licenses/gpl).
from odoo import api, models
class IrRule(models.Model):
_inherit = 'ir.rule'
@api.model
def _compute_domain(self, model_name, mode="read"):
if getattr(self.env, '_bypass_access', False):
if self.env._bypass_exception != model_name:
return []
return super(IrRule, self)._compute_domain(model_name,
|
mode=mode)
|
acevest/acecode
|
learn/python/mul.py
|
Python
|
gpl-2.0
| 462
| 0.008658
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------
|
-----------------------------
# File Name: mul.py
# Author: Zhao Yanbai
# Thu Oct 1 15:10:27 2015
# Description: none
# ------------------
|
------------------------------------------------------
for j in range(1, 10) :
for i in range(1, 10) :
if i>j :
continue
print "{0}*{1}={2:<2d}\t".format(i, j, i*j),
print ""
|
kernsuite-debian/lofar
|
SAS/DataManagement/ResourceTool/resourcetool.py
|
Python
|
gpl-3.0
| 24,976
| 0.006006
|
#!/usr/bin/env python3
# Copyright (C) 2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
"""
resourcetool.py
Simple utility to list or update RADB resource availability values.
Essentially a tool around RADB getResources(), updateResourceAvailability(), getResourceClaims() and (parts of) updateResourceClaims().
Can also figure out available capacity for a mounted storage resource and update it in the RADB (-U/--update-available-storage-capacity option).
Can also update storage claim endtime to its task endtime (if ended) in the RADB (-E/--end-past-tasks-storage-claims option).
Examples (source lofarinit.sh to set LOFARROOT, PYTHONPATH, ...):
- Update available (local) storage capacity and set storage claim endtimes to task endtimes (if ended) for an observation storage node, e.g. via cron in operations:
source /opt/lofar/lofarinit.sh; LOFARENV=PRODUCTION /opt/lofar/bin/resourcetool --broker=scu001.control.lofar --end-past-tasks-storage-claims --update-available-storage-capacity
- Show all DRAGNET resources on the test system RADB:
LOFARENV=TEST resourcetool --broker=scu199.control.lofar --resource-group-root=DRAGNET
- Deactivate 2 storage resources in operations, because disks from both storage areas are found to be faulty (then still need to re-schedule tasks):
LOFARENV=PRODUCTION resourcetool --broker=scu001.control.lofar drg01_storage:/data1=False drg01_storage:/data2=False
NOTES:
! Be careful what system (operations or test) this command applies to! This can be set using the env vars LOFARENV=TEST or LOFARENV=PRODUCTION
Operations vs Test (vs Development) can be seen from the default RADB_BUSNAME in the usage info: lofar.* vs test.lofar.* vs devel.lofar.*
! By default, listed or updateable resources are restricted to resources under the localhost's resource group.
This is on purpose to make -U work correctly. The -G/--resource-group-root option can be used to widen the resource group scope for listing
or explicit command-line updates, but non-default -G with -U is rejected: it is too easy to mass-update other resources with local filesystem info.
"""
import logging
from datetime import datetime, timedelta
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RADBRPC
from lofar.common.util import humanreadablesize
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.WARN)
logger = logging.getLogger(__name__)
def printResources(resources, scaled_units=True):
""" E.g.: resources = [{u'total_capacity': 3774873600, u'name': u'dragproc_bandwidth:/data', u'type_id': 3,
u'available_capacity': 3774873600, u'type_name': u'bandwidth', u'unit_id': 3,
u'active': True, u'used_capacity': 0, u'id': 118, u'unit': u'bits/second',
'claimable_capacity': 3774873600}, ...] # this key was added (not from RADB); it can be negative!
"""
header = {'id': 'RId', 'name': 'Resource Name', 'active': 'Active',
'available_capacity': ' Avail. Capacity', 'claimable_capacity': ' Claimable Cap.',
'total_capacity': ' Total Capacity', 'unit': 'Unit'}
print(('{id:4s} {name:24s} {active:6s} {available_capacity} {claimable_capacity} {total_capacity} {unit}'.format(**header)))
print('===================================================================================================')
resources.sort(key=lambda r: r['id']) # SQL could have done this better
for res in resources:
res['active'] = 'True' if res['active'] else 'False' # to solve bool formatting issue
if scaled_units and (res['type_name'] == 'storage' or res['type_name'] == 'bandwidth'):
unit_base = 1024 if res['type_name'] == 'storage' else 1000 # check type_name instead of unit as in printClaims()
res['available_capacity'] = humanreadablesize(res['available_capacity'], '', unit_base)
res['claimable_capacity'] = humanreadablesize(res['claimable_capacity'], '', unit_base)
res['total_capacity'] = humanreadablesize(res['total_capacity'] , '', unit_base)
cap_conv = '>16s'
else:
cap_conv = '16d'
print((('{id:4d} {name:24s} {active:6s} {available_capacity:' + cap_conv +
'} {claimable_capacity:' + cap_conv + '} {total_capacity:' + cap_conv + '} {unit}').format(**res)))
if not resources:
print('<no resources>')
def printClaims(claims, scaled_units=True):
""" E.g.: claims = [{u'claim_size': 76441190400, u'endtime': datetime.datetime(2018, 6, 13, 17, 40),
u'id': 67420, u'resource_id': 122, u'resource_name': u'drg01_storage:/data1',
u'resource_type_id': 5, u'resource_type_name': u'storage',
u'starttime': datetime.datetime(2017, 6, 13, 17, 30),
u'status': u'claimed', u'status_id': 1, u'task_id': 75409, ...}, ...]
"""
header = {'id': 'ClId', 'resource_name': 'Resource Name', 'starttime': 'Start Time', 'endtime': 'End Time',
'claim_size': 'Claim Size', 'status': 'Status'}
print(('{id:7s} {resource_name:24s} {starttime:19s} {endtime:19s} {claim_size:16s} {status:8s}'.format(**header)))
print('===================================================================================================')
claims.sort(key=lambda c: c['id']) # secondary sorting key; SQL could have done this better
claims.sort(key=lambda c: c['starttime']) # primary sorting key (stable sort)
for claim in claims:
if scaled_units and (claim['resource_type_name'] == 'storage' or claim['resource_type_name'] == 'bandwidth'):
unit_base = 1024 if claim['resource_type_name'] == 'storage' else 1000 # no unit name here, so check type_name
claim['claim_size'] = humanreadablesize(claim['claim_size'], '', unit_base)
size_conv = '>16s'
else:
size_conv = '16d'
print((('{id:7d} {resource_name:24s} {starttime} {endtime} {claim_size:' + size_conv +
'} {status:8s}').format(**claim)))
if not claims:
print('<no claims on specified resources and time range>')
def updateStorageClaimsEndTime(radb, resources, storage_resource_type_id, lower_bound=None, upper_bound=None):
""" Update storage claims on resources in the RADB that currently apply, but the task
they belong to has ended (+ a short while). Set end time of these claims to task endtime.
This is intended for user clusters (e.g. DRAGNET) that do not auto-terminate storage claims on
cleanup. If users manage clean up autonomously, then they manage all storage accounting themselves.
"""
status = 0
resource_ids = [res['id'] for res in resources]
now = datetime.utcnow()
if lower_bound is None:
lower_bound = now
if upper_bound is None:
upper_bound = now
claims = radb.getResourceClaims(lower_bound=lower_bound, upper_bound=upper_bound,
resource_ids=resource_ids,
resource_type=storage_resour
|
ce_type_id)
# Get associated tasks for their end times. Update claims for tasks that ended.
task_ids = list(set({claim['task_id'] for claim i
|
n claims}))
|
iaddict/mercurial.rb
|
vendor/mercurial/contrib/debugshell.py
|
Python
|
mit
| 533
| 0.003752
|
# debugshell extension
"""a python shell with repo, changelog & manifest objects"""
import mercurial
import code
def debugshell(ui, repo, **opts):
objects = {
'mercurial': me
|
rcurial,
'repo': repo,
'cl': repo.changelog,
'mf': repo.manifest,
}
bannermsg = "loaded repo : %s\n" \
"using source: %s" % (repo.root,
|
mercurial.__path__[0])
code.interact(bannermsg, local=objects)
cmdtable = {
"debugshell|dbsh": (debugshell, [])
}
|
csmengwan/autorest
|
AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureParameterGrouping/setup.py
|
Python
|
mit
| 1,158
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
#
|
Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------
|
------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestparametergroupingtestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.3.0", "msrestazure>=0.3.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestParameterGroupingTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestParameterGroupingTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
|
andersk/zulip
|
zerver/migrations/0192_customprofilefieldvalue_rendered_value.py
|
Python
|
apache-2.0
| 418
| 0
|
# Generated by Django 1.11.16 on 2018-11-14 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
d
|
ependencies = [
("zerver", "0191_realm_seat_limit"),
]
operations = [
migrations.AddField(
model_name="customprofilefiel
|
dvalue",
name="rendered_value",
field=models.TextField(default=None, null=True),
),
]
|
Valka7a/python-playground
|
sqlite3/tutorials/module-in-python.py
|
Python
|
mit
| 756
| 0.001323
|
"""
To use the SQLite3 module we need to add an import statement to our python script:
_____
|
___________________________________________________________________________
>>> import sqlite3
________________________________________________________________________________
We can check sqlite version:
________________________________________________________________________________
>>> sqlite3.verison
'2.6.0'
>>> sqlite3.sqlite_version
'3.7.17'
____________________________________________________________________________
|
____
The sqlite.version is the version of the pysqlite (2.6.0), which is the binding
of the Python language to the SQLite database. The sqlite3.sqlite_version gives
us the version of the SQLite database library. In our case it is 3.7.17.
|
DonBeo/statsmodels
|
statsmodels/duration/tests/test_phreg.py
|
Python
|
bsd-3-clause
| 11,981
| 0.003506
|
import os
import numpy as np
from statsmodels.duration.hazard_regression import PHReg
from numpy.testing import (assert_allclose,
assert_equal)
import pandas as pd
# TODO: Include some corner cases: data sets with empty strata, strata
# with no events, entry times after censoring times, etc.
# All the R results
from . import survival_r_results
from . import survival_enet_r_results
"""
Tests of PHReg against R coxph.
Tests include entry times and stratification.
phreg_gentests.py generates the test data sets and puts them into the
results folder.
survival.R runs R on all the test data sets and constructs the
survival_r_results module.
"""
# Arguments passed to the PHReg fit method.
args = {"method": "bfgs", "disp": 0}
def get_results(n, p, ext, ties):
if ext is None:
coef_name = "coef_%d_%d_%s" % (n, p, ties)
se_name = "se_%d_%d_%s" % (n, p, ties)
time_name = "time_%d_%d_%s" % (n, p, ties)
hazard_name = "hazard_%d_%d_%s" % (n, p, ties)
else:
coef_name = "coef_%d_%d_%s_%s" % (n, p, ext, ties)
se_name = "se_%d_%d_%s_%s" % (n, p, ext, ties)
time_name = "time_%d_%d_%s_%s" % (n, p, ext, ties)
hazard_name = "hazard_%d_%d_%s_%s" % (n, p, ext, ties)
coef = getattr(survival_r_results, coef_name)
se = getattr(survival_r_results, se_name)
time = getattr(survival_r_results, time_name)
hazard = getattr(survival_r_results, hazard_name)
return coef, se, time, hazard
class TestPHReg(object):
# Load a data file from the results directory
def load_file(self, fname):
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=" ")
time = data[:,0]
status = data[:,1]
entry = data[:,2]
exog = data[:,3:]
return time, status, entry, exog
# Run a single test against R output
def do1(self, fname, ties, entry_f, strata_f):
# Read the test data.
time, status, entry, exog = self.load_file(fname)
n = len(time)
vs = fname.split("_")
n = int(vs[2])
p = int(vs[3].split(".")[0])
ties1 = ties[0:3]
# Needs to match the kronecker statement in survival.R
strata = np.kron(range(5), np.ones(n/5))
# No stratification or entry times
mod = PHReg(time, exog, status, ties=ties)
phrb = mod.fit(**args)
coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)
assert_allclose(phrb.params, coef_r, rtol=1e-3)
assert_allclose(phrb.bse, se_r, rtol=1e-4)
#time_h, cumhaz, surv = phrb.baseline_hazard[0]
# Entry times but no stratification
phrb = PHReg(time, exog, status, entry=entry,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-3)
# Stratification but no entry times
phrb = PHReg(time, exog, status, strata=strata,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-4)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Stratification and entry times
phrb = PHReg(time, exog, status, entry=entry,
strata=strata, ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et_st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Run all the tests
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("survival")
and x.endswith(".csv")]
for fname in fnames:
for ties in "breslow","efron":
for entry_f in False,True:
for strata_f in False,True:
yield (self.do1, fname, ties, entry_f,
strata_f)
def test_missing(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
time[0:5] = np.nan
status[5:10] = np.nan
exog[10:15,:] = np.nan
md = PHReg(time, exog, status, missing='drop')
assert_allclose(len(md.endog), 185)
assert_allclose(len(md.status), 185)
assert_allclose(md.exog.shape, np.r_[185,4])
def test_formula(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
entry = np.zeros_like(time)
entry[0:10] = time[0:10] / 2
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1],
"exog3": exog[:, 2], "exog4": exog[:, 3],
"entry": entry})
mod1 = PHReg(time, exog, status, entry=entry)
rslt1 = mod1.fit()
fml = "time ~ 0 + exog1 + exog2 + exog3 + exog4"
mod2 = PHReg.from_formula(fml, df, status=status,
entry=entry)
rslt2 = mod2.fit()
mod3 = PHReg.from_formula(fml, df, status="status",
entry="entry")
rslt3 = mod3.fit()
assert_allclose(rslt1.params, rslt2.params)
assert_allclose(rslt1.params, rslt3.params)
assert_allclose(rslt1.bse, rslt2.bse)
assert_allclose(rslt1.bse, rslt3.bse)
def test_predict_formula(self):
n = 100
np.random.seed(34234)
time = 50 * np.random.uniform(size=n)
status = np.random.randint(0, 2, n).astype(np.float64)
exog = np.random.uniform(1, 2, size=(n, 2))
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1]})
fml = "time ~ 0 + exog1 + np.log(exog2) + exog1*exog2"
model1 = PHReg.from_formula(fml, df, status=status)
result1 = model1.fit()
from patsy import dmatrix
dfp = dmatrix(model1.data.design_info.builder, df)
pr1 = result1.predict()
pr2 = result1.predict(exog=df)
pr3 = model1.predict(result1.params, exog=dfp) # No standard errors
pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)
prl = (pr1, pr2, pr3, pr4)
for i in range(4):
for j in range(i):
assert_allclose(prl[i].predicted_values, prl[j].predicted_values)
prl = (pr1, pr2, pr4)
for i in range(3):
for j in range(i):
assert_allclose(prl[i].standard_errors, prl[j].standard_errors)
def test_offset(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod1 = PHReg(time, exog, status)
rslt1 = mod1.fit()
offset = exog[:,0] * rslt1.params[0]
exog =
|
exog[:, 1:]
mod2 = PHReg(time, exog, status, offset=offset)
rslt2 = mod2.fit()
assert_allclose(rslt2.params,
|
rslt1.params[1:])
def test_post_estimation(self):
# All regression tests
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
mart_resid = rslt.martingale_residuals
assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)
w_avg = rslt.weighted_covariate_averages
assert_allclose(np.abs(w_avg[0]).sum(0),
np.r_[7.31008415,
|
waynegm/OpendTect-Plugins
|
bin/python/wmpy/Skeletons/ex_multi_trace_single_attribute_input_single_output.py
|
Python
|
gpl-3.0
| 2,619
| 0.030546
|
# External Attribute Skeleton
#
# Input: Multi-trace, single attribute
# Output: Single attribute
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# The attribute parameters - keep what you need
#
xa.params = {
'Inputs': ['Input'],
'ZSampMargin' : {'Value': [-30,30], 'Minimum': [-1,1], 'Symmetric': True, 'Hidden': False},
'StepOut' : {'Value': [1,1], 'Minimum': [1,1], 'Hidden': False},
'Par_0' : {'Name': 'Parameter 0', 'Value': 0},
'Par_1' : {'Name': 'Parameter 1', 'Value': 1},
'Par_2' : {'Name': 'Parameter 2', 'Value': 2},
'Par_3' : {'Name': 'Parameter 3', 'Value': 3},
'Par_4' : {'Name': 'Parameter 4', 'Value': 4},
'Par_5' : {'Name': 'Parameter 5', 'Value': 5},
'Select' : {'Name': 'Option', 'Values': ['First', 'Second', 'Third'], 'Selection': 0},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
#
# Define the compute function
#
def doCompute():
#
# Initialise some constants from the attribute parameters or the SeismicInfo, xa.SI, array for use in the calculations
# These are just some examples - keep/add what you need
#
number_inline = xa.SI['nrinl']
numb
|
er_xline = xa.SI['nrcrl']
centre_trace_x = xa.SI['nrinl']//2
centre_trace_y = xa.SI['nrcrl']//2
nyquist = 1.0/(2.0*xa.SI['zstep'])
par0 = xa.params['Par_0']['Value']
zw = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'
|
][0] + 1
select = xa.params['Select']['Selection']
#
# This is the trace processing loop
#
while True:
xa.doInput()
#
# After doInput the TraceInfo, xa.TI, array contains information specific to this trace segment - keep what you need
#
number_of_samples = xa.TI['nrsamp']
start_time = xa.TI['z0']
current_inline = xa.TI['inl']
current_crossline = xa.TI['crl']
#
# Get the input
#
indata = xa.Input['Input']
#
# Your attribute calculation goes here
#
# Warning Python loops can be slow - this is contrived just to show how to access traces in the analysis data cube
#
outdata = np.zeros(number_of_samples)
for inline in range(number_inline):
for xline in range(number_xline):
if (inline != centre_trace_x and xline != centre_trace_y):
outdata += indata[inline,xline,:]
outdata /= (number_inline * number_xline - 1)
#------------------------------------------------------------------------------------
#
xa.Output = outdata
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
|
Universal-Model-Converter/UMC3.0a
|
scripts/SSBM.py
|
Python
|
mit
| 25,037
| 0.037385
|
from data.COMMON import * #essentials
Header( 0.001, #Script Version (for updates)
('Melee',['dat']), #model activation
('Melee',['dat']), #anim activation
['RVL_IMG'])#revolution']) #included libs
#gist number: 2757147
#for the work I've done to get this far, this should really be v3.6 heh...
#but because this is a first release for a new program, why waste it. ^_^
from data.COMMON import * #essentials + Libs
#the functions from the included libs are imported directly into COMMON
#(you don't need the lib's name to use it's function)
#def ImportGUI(): #still in development
#this function is called before ImportModel
#-----------
def ImportModel(T,C):
#####
#1+None # <-- error for testing script reloading
#####
from math import cos,sin,pi #please refrain from using imports (these will be supported by dev5)
global degrad; degrad = pi/180
#used by _Bone and _Object for storing and reading the transformed matrices
global bones; bones=[] #bone offsets
global matrices; matrices=[] #matrices ( bind,invbind = matrices[ bones.index(bone_offset) ] )
def MTX44(): return [[1.0,0.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,0.0,1.0,0.0],[0.0,0.0,0.0,1.0]]
def TransformMatrix( translate, rotate, scale ):
global degrad
cosx = cos(rotate[0] * degrad)
sinx = sin(rotate[0] * degrad)
cosy = cos(rotate[1] * degrad)
siny = sin(rotate[1] * degrad)
cosz = cos(rotate[2] * degrad)
sinz = sin(rotate[2] * degrad)
return [
[ scale[0] * cosy * cosz,
scale[1] * (sinx * cosz * siny - cosx * sinz),
scale[2] * (sinx * sinz + cosx * cosz * siny),
translate[0]],
[ scale[0] * sinz * cosy,
scale[1] * (sinx * sinz * siny + cosz * cosx),
scale[2] * (cosx * sinz * siny - sinx * cosz),
translate[1]],
[ -scale[0] * siny,
scale[1] * sinx * cosy,
scale[2] * cosx * cosy,
translate[2]],
[0.0,0.0,0.0,1.0]]
'''this block will change'''
def str_off(offset): #returns a hex-string of a given int offset '0x00000000'
STRING=(hex(offset).replace('0x','')).upper()
return '0'*(8-len(STRING))+STRING
################################################################################################
'''object-block functions'''
def vector(D_Type, exponent, offset=None, IS3D=1): #returns an [X,Y(,Z)] vector
#TODO: direct data values (if ever found)
def DataType(DT):
if DT==0: return bu8()/pow(2.0,exponent) #8bit unsigned pseudo-float
if DT==1: return bs8()/pow(2.0,exponent) #8bit signed pseudo-float
if DT==2: return bu16()/pow(2.0,exponent) #16bit unsigned pseudo-float
if DT==3: return bs16()/pow(2.0,exponent) #16bit signed pseudo-float
if DT==4: return bf32() #32bit float
if offset==None: #Direct format
return '' #yet to be seen (return blank vector)
else: #indexed format
j=Jump(offset, label=' -- Vector Data:')
vec=[DataType(D_Type),DataType(D_Type)]+([DataType(D_Type)] if IS3D else [])
Jump(j); return vec
def transform(V,M): #transform the vector via the matrix
return [((M[0][0]*V[0]) + (M[0][1]*V[1]) + (M[0][2]*V[2]) + M[0][3]),
((M[1][0]*V[0]) + (M[1][1]*V[1]) + (M[1][2]*V[2]) + M[1][3]),
((M[2][0]*V[0]) + (M[2][1]*V[1]) + (M[2][2]*V[2]) + M[2][3])]
def Ntransform(N,M): #transform the normal via the matrix
return [(M[0][0]*N[0]) + (M[0][1]*N[1]) + (M[0][2]*N[2]),
(M[1][0]*N[0]) + (M[1][1]*N[1]) + (M[1][2]*N[2]),
(M[2][0]*N[0]) + (M[2][1]*N[1]) + (M[2][2]*N[2])]
def getWeights(WOL):
Jump(WOL, label=' -- [ Weight_Offset ]:' )
ML=[]
for WO in StructArr(['bu32']): #Matrix/Weight Offset
Jump(WO[0]+32, label=' -- [ Bone_Offset , Weight ]:')
inflmtx = [[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0]]
#---restructured using the src for BrawlBox:
_weights = StructArr(['bu32','bf32'])
if len(_weights)>1:
for MO,W in _weights:
bind,invbind = matrices[bones.index(MO+32)]
'''
invbind = MtxTranspose(invbind)
invbind[0][3],invbind[1][3],invbind[2][3] = invbind[3][:3]
invbind[3][0],invbind[3][1],invbind[3][2] = [0.0,0.0,0.0]
'''
#'''
tempmtx = MtxMultiply(bind,invbind)
'''^ that's the world-transform matrix'''
for r in range(4):
for c in range(4):
inflmtx[r][c]+=tempmtx[r][c]*W
elif len(_weights)==1:
MO,W = _weights[0]
bind,invbind = matrices[bones.index(MO+32)]
for r in range(4):
for c in range(4):
inflmtx[r][c]=bind[r][c]*W
#---
'''
inflmtx = MtxTranspose(invbind)
inflmtx[0][3],inflmtx[1][3],inflmtx[2][3] = inflmtx[3][:3]
inflmtx[3][0],inflmtx[3][1],inflmtx[3][2] = [0.0,0.0,0.0]
'''
#'''
ML+=[ inflmtx ]
return ML
def getAttrs(AO): Jump(AO, label=' -- Attributes [CP_index,CP_Type,IsXYZ/NBT/A/ST,Data_Type,Exponent,unk,Stride/Format,Offset]');\
return StructArr(['bu32','bu32','bu32','bu32','bu8','bu8','bu16','bu32'],[255,'*','*','*','*','*','*','*'])
def CPT(T): #face-point index/value formats
if T == 0: return '',0 #null
if T == 1: return None,0 #direct data (handled by code)
if T == 2: return bu8(),1 #8bit index
if T == 3: return bu16(),2 #16bit index
def geometry(Attributes,weights_list):
global length;length=0
def color(fmt,offset=None,alpha=1): #currently returns [R,G,B,A] int() colors only
global length #TODO: remove to increase speed (faster if we don't redefine this function)
if offset==None: #Direct format
if fmt==0:length+=2;D=bu16(); return [
int(((D>>11)&31)*(255/31)),int(((D>>5)&63)*(255/63)),int((D&31)*(255/31)),255] #RGB565
if fmt==1:length+=3; return [bu8(),bu8(),bu8(),255] #return [bu8(),bu8(),bu8()] #RGB8
if fmt==2:length+=4; return [bu8(),bu8(),bu8(),bu8()] #RGBX8
if fmt==3:length+=2; RG,BA=bu8(),bu8(); R,G,B,A=RG>>4,RG&15,BA>>4,BA&15; return [
(R*16)+R,(G*16)+G,(B*16)+B,(A*16)+A]#RGBA4
if fmt==4:length+=3; D=bu24(); return [
(D>>18)*(255/63),((D>>12)&63)*(255/63),((D>>6)&63)*(255/63),(D&63)*(255/63)] #RGBA6
if fmt==5:length+=4; return [bu8(),bu8(),bu8(),bu8()] #RGBA8
else: #indexed format
return [255,255,255,255]
|
#yet to be seen (returning white)
count=bu16(label=' -- Facepoint Count')
while count>0:
tmtx = MTX44() #transformations
V,N,C,U='','',['',''],['','','','','','','','']
for attr in Attributes:
I,L=CPT(attr[1]); length += L
def Get(IS3D): return vector(attr[3], attr[4],
|
(attr[7]+(I*attr[6]))+32, IS3D)
switch(attr[0])
if case( 0): tmtx = weights_list[bu8()/3];length+=1; LABEL(' -- Weight Index/value') #vert/nor_mtx
elif case( 1): bu8()/3; length+=1 #uv[0]_mtx (value not used yet)
elif case( 2): bu8()/3; length+=1 #uv[1]_mtx
elif case( 3): bu8()/3; length+=1 #uv[2]_mtx
elif case( 4): bu8()/3; length+=1 #uv[3]_mtx
elif case( 5): bu8()/3; length+=1 #uv[4]_mtx
elif case( 6): bu8(
|
tidepool-org/dfaker
|
dfaker/cbg.py
|
Python
|
bsd-2-clause
| 2,185
| 0.008696
|
import statsmodels.api as sm
from . import common_fields
from . import make_gaps
from . import tools
from .device_event import make_alarm_event
def apply_loess(solution, num_days, gaps):
"""Solves the blood glucose equation over specified period of days
and applies a loess smoothing regression to the data
Returns numpy arrays for glucose and time values
"""
#solving for smbg valuesn
smbg_gluc = solution[:, 1]
smbg_time = solution[:, 2]
#make gaps in cbg data, if needed
solution = make_gaps.gaps(solution, num_days=num_days, gaps=gaps)
#solving for cbg values
cbg_gluc = solution[:, 1]
cbg_time = solution[:, 2]
#smoothing blood glucose eqn
lowess = sm.nonparametric.lowess
smoothing_distance = 1.5 #1.5 minutes
fraction = (smoothing_distance / (num_days * 60 * 24)) * 100
result = lowess(cbg_gluc, cbg_time, frac=fraction, is_sorted=True)
smoothed_cbg_time = result[:, 0]
smoothed_cbg_gluc = result[:, 1]
return smoothed_cbg_gluc, smoothed_cbg_time, smbg_gluc, smbg_time
def cbg(gluc, timesteps, zonename):
""" construct cbg events
|
gluc -- a list of glucose values at each timestep
timesteps -- a list of epoch times
zonename -- name of timezone in effect
"""
cbg_data = []
for value, timestamp in zip(gluc, timesteps):
cbg_reading = {}
cbg_reading = common_fields.add_common_fields('cbg', cbg_reading, timestamp, zonename)
cbg_reading["value"] = tools.convert_to_mmol(value)
cbg_reading["units"] = "mmol/L"
if value > 400:
cbg_readi
|
ng["annotation"] = [{"code": "bg/out-of-range", "threshold": 400, "value": "high"}]
cbg_reading["value"] = tools.convert_to_mmol(401)
elif value < 40:
cbg_reading["annotation"] = [{"code": "bg/out-of-range", "threshold": 40, "value": "low"}]
cbg_reading["value"] = tools.convert_to_mmol(39)
#add a device meta alarm for low insulin reading
meta_alarm = make_alarm_event(timestamp, zonename)
cbg_data.append(meta_alarm)
cbg_data.append(cbg_reading)
return cbg_data
|
spatialdev/onadata
|
onadata/apps/logger/migrations/0048_auto__add_project__add_unique_project_name_organization__add_projectxf.py
|
Python
|
bsd-2-clause
| 16,039
| 0.007419
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'logger_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('metadata', self.gf('jsonfield.fields.JSONField')(blank=True)),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_org', to=orm['auth.User'])),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_owner', to=orm['auth.User'])),
('shared', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('logger', ['Project'])
# Adding M2M table for field user_stars on 'Project'
m2m_table_name = db.shorten_name(u'logger_project_user_stars')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['logger.project'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['project_id', 'user_id'])
# Adding unique constraint on 'Project', fields ['name', 'organization']
db.create_unique(u'logger_project', ['name', 'organization_id'])
# Adding model 'ProjectXForm'
db.create_table(u'logger_projectxform', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('xform', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_xforms', to=orm['logger.XForm'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_projects', to=orm['logger.Project'])),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='px_creator', to=orm['auth.User'])),
))
db.send_create_signal('logger', ['ProjectXForm'])
# Adding unique constraint on 'ProjectXForm', fields ['xform', 'project']
db.create_unique(u'logger_projectxform', ['xform_id', 'project_id'])
def backwards(self, orm):
# Removing unique constraint on 'ProjectXForm', fields ['xform', 'project']
db.delete_unique(u'logger_projectxform', ['xform_id', 'project_id'])
# Removing unique constraint on 'Project', fields ['name', 'organization']
db.delete_unique(u'logger_project', ['name', 'organization_id'])
# Deleting model 'Project'
db.delete_table(u'logger_project')
# Removing M2M table for field user_stars on 'Project'
db.delete_table(db.shorten_name(u'logger_project_user_stars'))
# Deleting model 'ProjectXForm'
db.delete_table(u'logger_projectxform')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['logger.Instance']"}),
'media_file': ('django.db.models.field
|
s.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'logger.instance': {
'Meta': {'object_name': 'Instance'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'au
|
to_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryCollectionField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''",
|
deepchem/deepchem-gui
|
gui/app.py
|
Python
|
gpl-3.0
| 8,020
| 0.001995
|
import os
from flask import Flask, url_for, request, render_template, jsonify, send_file
from werkzeug.utils import secure_filename
import deepchem as dc
import subprocess
from shutil import copyfile
import csv
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'static/')
DEEPCHEM_GUI = Flask('deepchem-gui', static_folder=STATIC_DIR,
static_url_path='/static',
template_folder=os.path.join(STATIC_DIR, 'deepchem-gui',
'templates')
)
UPLOAD_DIR = os.path.join(STATIC_DIR, "data/")
if not os.path.isdir(UPLOAD_DIR):
os.mkdir(UPLOAD_DIR)
print("Created data directory")
# serve ngl webapp clone
@DEEPCHEM_GUI.route('/')
def webapp():
return render_template('webapp.html')
# download protein and ligand files
@DEEPCHEM_GUI.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
proteins = request.files.getlist('proteins')
ligands = request.files.getlist('ligands')
smiles = request.files.getlist('smiles')
smarts = request.files.getlist('smarts')
if proteins and ligands:
protein_fns = []
ligand_fns = []
for protein in proteins:
protein_fn = os.path.join(
UPLOAD_DIR,
secure_filename(protein.filename)
)
protein.save(protein_fn)
protein_fns.append(protein_fn)
for ligand in ligands:
ligand_fn = os.path.join(
UPLOAD_DIR,
secure_filename(ligand.filename)
)
ligand.save(ligand_fn)
ligand_fns.append(ligand_fn)
docking_result = dock(protein_fns, ligand_fns)
print(docking_result)
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = docking_result[i][j]["protein"]
new_protein_fn = protein_fn.split("/")[-1]
copyfile(protein_fn, os.path.join(
UPLOAD_DIR, new_protein_fn))
docking_result[i][j]["protein"] = url_for(
'static', filename="data/" + new_protein_fn)
ligand_fn = docking_result[i][j]["ligand"]
new_ligand_fn = ligand_fn.split("/")[-1]
copyfile(ligand_fn,
os.path.join(UPLOAD_DIR, new_ligand_fn))
docking_result[i][j]["ligand"] = url_for(
'static', filename="data/" + new_ligand_fn)
return jsonify(docking_result)
elif smiles:
smiles = smiles[0]
smiles_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smiles.filename)
)
smiles.save(smiles_fn)
csvfile = open(smiles_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smiles(data)
return jsonify(data)
elif smarts:
smarts = smarts[0]
smarts_fn = os.path.join(
UPLOAD_DIR,
secure_filename(smarts.filename)
)
smarts.save(smarts_fn)
csvfile = open(smarts_fn, 'r')
csvreader = csv.reader(csvfile, delimiter=',')
data = []
for row in csvreader:
data.append(row)
data = render_smarts(data)
return jsonify(data)
else:
return jsonify(error_msg="Invalid file transfer.")
else:
raise NotImplementedError
def render_smiles(data):
smiles_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMILES"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("SMILES IMG")
continue
try:
smiles_str = data[i][smiles_col_idx]
smiles = Chem.MolFromSmiles(smiles_str)
AllChem.Compute2DCoords(smiles)
smiles_fn = 'smiles_%d.png' % i
smiles_img = os.path.join(UPLOAD_DIR, smiles_fn)
Draw.MolToFile(smiles, smiles_img)
data[i].append(url_for('static', filename='data/' + smiles_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
pass
return data
def render_smarts(data):
smarts_col_idx = [j for j in range(len(data[0])) if data[0][j]=="SMARTS"][0]
smiles_col_idx_1 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_1"][0]
smiles_col_idx_2 = [j for j in range(len(data[0])) if data[0][j]=="SMILES_2"][0]
for i, row in enumerate(data):
if i==0:
data[i].append("PRODUCT")
data[i].append("SMILES_1 IMG")
data[i].append("SMILES_2 IMG")
data[i].append("PRODUCT IMG")
continue
try:
smarts_str = data[i][smarts_col_idx]
smiles_str_1 = data[i][smiles_col_idx_1]
smiles_str_2 = data[i][smiles_col_idx_2]
rxn = AllChem.ReactionFromSmarts(smarts_str)
ps = rxn.RunReactants((Chem.MolFromSmiles(smiles_str_1), Chem.MolFromSmiles(smiles_str_2)))
product = ps[0][0]
product_str = Chem.MolToSmiles(product)
data[i].append(product_str)
AllChem.Compute2DCoords(product)
product_fn = 'product_%d.png' % i
product_img = os.path.join(UPLOAD_DIR, product_fn)
Draw.MolToFile(product, product_img)
smiles_1 = Chem.MolFromSmiles(smiles_str_1)
AllChem.Compute2DCoords(smiles_1)
smiles_1_fn = 'smiles_1_%d.png' % i
smiles_1_img = os.path.join(UPLOAD_DIR, smiles_1_fn)
Draw.MolToFile(smiles_1, smiles_1_img)
smiles_2 = Chem.MolFromSmiles(smiles_str_2)
AllChem.Compute2DCoords(smiles_2)
smiles_2_fn = 'smiles_2_%d.png' % i
smiles_2_img = os.path.join(UPLOAD_DIR, smiles_2_fn)
Draw.MolToFile(smiles_2, smiles_2_img)
data[i].append(url_for('static', filename='data/' + product_fn))
data[i].append(url_for('static', filename='data/' + smiles_1_fn))
|
data[i].append(url_for('static', filename='data/' + smiles_2_fn))
except Exception as e:
print(e)
data[i].append("Invalid")
data[i].append("Invalid")
data[i].append("Invalid")
pass
return data
def dock(protein_fns, ligand_fns):
docking_result = [[{} for j in range(len(ligand_fns))]
for i in range(len
|
(protein_fns))]
for i in range(len(protein_fns)):
for j in range(len(ligand_fns)):
protein_fn = protein_fns[i]
ligand_fn = ligand_fns[j]
print("Docking: %s to %s" % (ligand_fn, protein_fn))
docker = dc.dock.VinaGridDNNDocker(
exhaustiveness=1, detect_pockets=False)
(score, (protein_docked, ligand_docked)
) = docker.dock(protein_fn, ligand_fn)
print("Scores: %f" % (score))
print("Docked protein: %s" % (protein_docked))
print("Docked ligand: %s" % (ligand_docked))
ligand_docked_fn = ligand_docked.replace(".pdbqt", "")
subprocess.call("csh %s %s" % (os.path.join(STATIC_DIR, 'deepchem-gui', 'scripts', 'stripqt.sh'),
ligand_docked_fn), shell=True)
ligand_docked_pdb = ligand_docked_fn + ".pdb"
docking_result[i][j] = {'score': score[
0], 'protein': protein_docked, 'ligand': ligand_docked_pdb}
return docking_result
|
rhyolight/nupic.son
|
app/soc/logic/program.py
|
Python
|
apache-2.0
| 944
| 0.002119
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing p
|
ermissions and
# limitations under the License.
"""Logic for programs."""
from soc.models import program as program_model
def getSponsorKey(program):
"""Returns key which represents Sponsor of the specified program.
Args:
pr
|
ogram: program entity
Returns:
db.Key instance of the sponsor for the specified program
"""
return program_model.Program.sponsor.get_value_for_datastore(program)
|
Kegbot/kegbot-server
|
pykeg/web/setup_wizard/views.py
|
Python
|
gpl-2.0
| 6,069
| 0.00033
|
from functools import wraps
import logging
import traceback
from django.conf import settings
from django.core import management
from django.contrib import messages
from django.contrib.auth import authenticate
from django.http import Http404
from django.shortcuts import redirect
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from pykeg.core import defaults
from pykeg.core import models
from pykeg.util import dbstatus
from pykeg.core.util import get_version_object
from .forms import AdminUserForm
from .forms import MiniSiteSettingsForm
logger = logging.getLogger(__name__)
def setup_view(f):
"""Decorator for setup views."""
def new_function(*args, **kwargs):
request = args[0]
if not settings.DEBUG:
raise Http404("Site is not in DEBUG mode.")
if request.kbsite and request.kbsite.is_setup:
raise Http404("Site is already setup, wizard disabled.")
return f(*args, **kwargs)
return wraps(f)(new_function)
@setup_view
@never_cache
def start(request):
"""Shows database setup button"""
context = {}
if request.method == "POST":
try:
management.call_command("migrate", no_input=True)
return redirect("setup_mode")
except Exception as e:
logger.exception("Error installing database")
context["error_message"] = str(e)
context["error_stack"] = traceback.format_exc()
else:
try:
logger.info("Checking database status ...")
dbstatus.check_db_status()
logger.info("Database status OK.")
except dbstatus.DatabaseNotInitialized:
context["need_install"] = True
except dbstatus.NeedMigration:
context["need_upgrade"] = True
return render(request, "setup_wizard/start.html", context=context)
@setup_view
@never_cache
def mode(request):
"""Shows the enable/disable hardware toggle."""
context = {}
if request.method == "POST":
if "enable_sensing" in request.POST:
response = redirect("setup_accounts")
response.set_cookie("kb_setup_enable_sensing", "True")
return response
elif "disable_sensing" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_sensing", "False")
response.set_cookie("kb_setup_enable_users", "False")
return response
else:
messages.error(request, "Unknown response.")
return render(request, "setup_wizard/mode.html", context=context)
@setup_view
@never_cache
def upgrade(request):
context = {}
if request.method == "POST":
try:
management.call_command("migrate", no_input=True)
site = models.KegbotSite.get()
app_version = get_version_object()
site.server_version = str(app_version)
site.save()
return redirect("kb-home")
except Exception as e:
logger.exception("Error installing database")
context["error_message"] = str(e)
context["error_stack"] = traceback.format_exc()
try:
logger.info("Checking database status ...")
dbstatus.check_db_status()
logger.info("Database status OK.")
except dbstatus.DatabaseNotInitialized:
context["message"] = "Database not initialized"
except dbstatus.NeedMigration:
context["message"] = "Database upgrade needed"
return render(request, "setup_wizard/upgrade.html", context=context)
@setup_view
@never_cache
def setup_accounts(request):
""" Shows the enable/disable accounts toggle. """
context = {}
if request.method == "POST":
if "enable_users" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_users", "True")
return respons
|
e
elif "disable_users" in request.POST:
response = redirect("setup_site_settings")
response.set_cookie("kb_setup_enable_users", "False")
return response
else:
messages.error(request, "Unknown response.")
return render(request, "setup_wizard/accounts.html", context=context)
|
@setup_view
@never_cache
def site_settings(request):
context = {}
if request.method == "POST":
site = models.KegbotSite.get()
form = MiniSiteSettingsForm(request.POST, instance=site)
if form.is_valid():
form.save()
messages.success(request, "Settings saved!")
return redirect("setup_admin")
else:
try:
defaults.set_defaults()
except defaults.AlreadyInstalledError:
pass
site = models.KegbotSite.get()
site.enable_sensing = request.COOKIES.get("kb_setup_enable_sensing") == "True"
site.enable_users = request.COOKIES.get("kb_setup_enable_users") == "True"
site.save()
form = MiniSiteSettingsForm(instance=site)
context["form"] = form
return render(request, "setup_wizard/site_settings.html", context=context)
@setup_view
@never_cache
def admin(request):
context = {}
form = AdminUserForm()
if request.method == "POST":
form = AdminUserForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(
username=form.cleaned_data.get("username"),
password=form.cleaned_data.get("password"),
)
return redirect("setup_finish")
context["form"] = form
return render(request, "setup_wizard/admin.html", context=context)
@setup_view
@never_cache
def finish(request):
context = {}
if request.method == "POST":
site = models.KegbotSite.get()
site.is_setup = True
site.save()
messages.success(request, "Tip: Install a new Keg in Admin: Taps")
return redirect("kb-home")
return render(request, "setup_wizard/finish.html", context=context)
|
TMiguelT/csvschema
|
csv_schema/__init__.py
|
Python
|
mit
| 103
| 0
|
# -*- coding: utf-8 -*-
"""Module that he
|
lps in checking the correctnes
|
s of CSV file structure."""
|
CalthorpeAnalytics/urbanfootprint
|
footprint/client/configuration/default/layer_style/default_layer_style.py
|
Python
|
gpl-3.0
| 793
| 0.001261
|
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warran
|
ty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from fo
|
otprint.main.models.presentation.layer_style import LayerStyle
__author__ = 'calthorpe_analytics'
class DefaultLayerStyle(LayerStyle):
"""
The default LayerStyle for newly created Layers that don't match
any more specific LayerStyle subclasses
"""
model_class = object
|
GoogleCloudPlatform/repo-automation-playground
|
xunit-autolabeler-v2/ast_parser/core/__init__.py
|
Python
|
apache-2.0
| 783
| 0
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Ver
|
sion 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required
|
by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Add the parent 'ast_parser' module to PYTHONPATH
# for consistency between Python and pytest
# Pytest root dir: '$REPO_ROOT/'
# Python root dir: '$REPO_ROOT/ast_parser'
import sys
sys.path.append('..')
|
guanxin0206/dice_crawler
|
dice_spider_2/spider/html_outputer.py
|
Python
|
bsd-2-clause
| 1,497
| 0.008059
|
#!/usr/bin/env python -t
# -*- coding: UTF-8 -*-
import codecs
import urllib
class HtmlOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
de
|
f output_html(self):
fout = open('output.html','w')
"""
for data in self.datas:
print data['url'].encode("utf-8"), type(data['url'].encode("utf-8"))
print urllib.unquote(data['url'].encode("utf-8")) ,type(urllib.unquote(data['url'].encode("utf-8")))
#print urllib.unquote(data['url']).encode("utf-8") , type(urllib.unquote(data['url']).encode("utf-8"))
#print data['title'],type(data['title'])
#print data['summary'],typ
|
e(data['summary'])
"""
fout.write("<html>")
fout.write("<head>")
fout.write("<meta charset='UTF-8'>")
fout.write("</head>")
fout.write("<body>")
fout.write("<table>")
# 默认编码ascii
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s</td>" % urllib.unquote(data['url'].encode('utf-8')))
fout.write("<td>%s</td>" % data['title'].encode('utf-8'))
fout.write("<td>%s</td>" % data['summary'].encode('utf-8'))
fout.write("</tr>")
fout.write("</table>")
fout.write("</body>")
fout.write("</html>")
fout.close()
|
jessamynsmith/twitterbot
|
tests/messages/test_base.py
|
Python
|
mit
| 989
| 0.003033
|
import unittest
from twitter_bot import messages
class TestBaseMessageProvider(unittest.TestCase):
def test_extract_hashtags_empty_mention(self):
provider = messages.BaseMessageProvider()
hashtags = provider._extract_hashtags({})
self.assertEqual([], hashtags)
def test_extract_hashtags_with_hashtags(self):
mention = {'entities': {'hashtags': [{'text': 'love'}, {'text': 'hate'}]}}
provider = messages.BaseMessageProvider()
hashtags = provider._extract_hashtags(mention)
self.assertEqual(['love', 'hate'], hashtags)
def test_create(self):
provider = messages.BaseMessageProvider()
try:
|
provider.create({}, 20)
self.fail("Should not be able to call create() on abstract parent class")
|
except NotImplementedError as e:
error = 'Child class must implement create(self, mention, max_message_length)'
self.assertEqual(error, '{0}'.format(e))
|
gngrwzrd/gity
|
python/add.py
|
Python
|
gpl-3.0
| 1,341
| 0.028337
|
# Copyright Aaron Smith 2009
#
# This file is part of Gity.
#
# Gity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gity. If not, see <http://www.gnu.org/licenses/>.
from _util i
|
mport *
try:
import sys,re,os,subprocess
except Exception, e:
sys.stderr.write(str(e))
exit(84)
command=""
try:
from _argv import *
if not checkfiles(options): raise Exception("Gity Error: The add command requires files! They weren't set.")
gitcommand="add"
command="%s %s %s %s"%(options.git,gitcommand,"--ignore-errors",make_file_list_for_git(option
|
s.files))
rcode,stout,sterr=run_command(command)
rcode_for_git_exit(rcode,sterr)
exit(0)
except Exception, e:
sys.stderr.write("The add command threw this error: " + str(e))
sys.stderr.write("\ncommand: %s\n" % command)
log_gity_version(options.gityversion)
log_gitv(options.git)
exit(84)
|
dichen001/Go4Jobs
|
JackChen/Google/484. Find Permutation.py
|
Python
|
gpl-3.0
| 1,750
| 0.003429
|
"""
By now, you are given a secret signature consisting of character 'D' and 'I'. 'D' represents a decreasing relationship between two numbers, 'I' represents an increasing relationship between two numbers. And our secret signature was constructed by a special integer array, which contains uniquely all the different number from 1 to n (n is the length of the secret signature plus 1). For example, the secret signature "DI" can be constructed by array [2,1,3] or [3,1,2], but won't be constructed by array [3,2,4] or [2,1,3,4], which are both illegal constructing special string that can't represent the "DI" secret signature.
On the other hand, now your job is to find the lexicographically smallest permutation of [1, 2, ... n] could refer to the given secret signature in the input.
Example 1:
Input: "I"
Output: [1,2]
Explanation: [1,2] is the only legal initial spectial string can construct secret s
|
ignature "I", where the number 1 and 2 construct an increasing relationship.
Example 2:
Input: "DI"
Output: [2,1,3]
Explanation: Both [2,1,3] and [3,1,2] can construct the secret signature "DI",
but since we want to find the one with the smallest lexicographical permutation, you need to output [2,1,3]
Note:
The input string will only contain the character 'D' and 'I'.
The length of input string is a positive integer and will not exceed 10,000
"""
class Solutio
|
n(object):
def findPermutation(self, s):
"""
:type s: str
:rtype: List[int]
"""
i, n = 0, len(s)
ans = range(1, n + 2)
while i < n:
j = i
while j < n and s[j] == "D":
j += 1
ans[i:j+1] = ans[i:j+1][::-1]
i = i + 1 if i == j else j
return ans
|
7digital/troposphere
|
examples/Lambda.py
|
Python
|
bsd-2-clause
| 5,154
| 0
|
from troposphere.constants import NUMBER
from troposphere import FindInMap, GetAtt, Join, Output
from troposphere import Parameter, Ref, Template
from troposphere.awslambda import Function, Code, MEMORY_VALUES
from troposphere.cloudformation import CustomResource
from troposphere.ec2 import Instance
from troposphere.ec2 import SecurityGroup
from troposphere.
|
iam import Role, Policy
t = Template()
t.add_version("2010-09-09")
ExistingVPC = t.add_parameter(Parameter(
"ExistingVPC",
Type="AWS::EC2::VPC::Id",
Description=(
"The VPC ID that includes the security groups in the "
|
"ExistingSecurityGroups parameter."
),
))
InstanceType = t.add_parameter(Parameter(
"InstanceType",
Default="t2.micro",
Type="String",
AllowedValues=["t2.micro", "m1.small"],
))
ExistingSecurityGroups = t.add_parameter(Parameter(
"ExistingSecurityGroups",
Type="List<AWS::EC2::SecurityGroup::Id>",
))
MemorySize = t.add_parameter(Parameter(
'LambdaMemorySize',
Type=NUMBER,
Description='Amount of memory to allocate to the Lambda Function',
Default='128',
AllowedValues=MEMORY_VALUES
))
Timeout = t.add_parameter(Parameter(
'LambdaTimeout',
Type=NUMBER,
Description='Timeout in seconds for the Lambda function',
Default='60'
))
t.add_mapping("AWSInstanceType2Arch",
{u'm1.small': {u'Arch': u'PV64'},
u't2.micro': {u'Arch': u'HVM64'}}
)
t.add_mapping("AWSRegionArch2AMI",
{u'ap-northeast-1': {u'HVM64': u'ami-cbf90ecb',
u'PV64': u'ami-27f90e27'},
u'ap-southeast-1': {u'HVM64': u'ami-68d8e93a',
u'PV64': u'ami-acd9e8fe'},
u'ap-southeast-2': {u'HVM64': u'ami-fd9cecc7',
u'PV64': u'ami-ff9cecc5'},
u'cn-north-1': {u'HVM64': u'ami-f239abcb',
u'PV64': u'ami-fa39abc3'},
u'eu-central-1': {u'HVM64': u'ami-a8221fb5',
u'PV64': u'ami-ac221fb1'},
u'eu-west-1': {u'HVM64': u'ami-a10897d6',
u'PV64': u'ami-bf0897c8'},
u'sa-east-1': {u'HVM64': u'ami-b52890a8',
u'PV64': u'ami-bb2890a6'},
u'us-east-1': {u'HVM64': u'ami-1ecae776',
u'PV64': u'ami-1ccae774'},
u'us-west-1': {u'HVM64': u'ami-d114f295',
u'PV64': u'ami-d514f291'},
u'us-west-2': {u'HVM64': u'ami-e7527ed7',
u'PV64': u'ami-ff527ecf'}}
)
code = [
"var response = require('cfn-response');",
"exports.handler = function(event, context) {",
" var responseData = {Value: event.ResourceProperties.List};",
" responseData.Value.push(event.ResourceProperties.AppendedItem);",
" response.send(event, context, response.SUCCESS, responseData);",
"};",
]
AppendItemToListFunction = t.add_resource(Function(
"AppendItemToListFunction",
Code=Code(
ZipFile=Join("", code)
),
Handler="index.handler",
Role=GetAtt("LambdaExecutionRole", "Arn"),
Runtime="nodejs",
MemorySize=Ref(MemorySize),
Timeout=Ref(Timeout)
))
LambdaExecutionRole = t.add_resource(Role(
"LambdaExecutionRole",
Path="/",
Policies=[Policy(
PolicyName="root",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}]
})],
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"Service": ["lambda.amazonaws.com"]
}
}]
},
))
MyEC2Instance = t.add_resource(Instance(
"MyEC2Instance",
SecurityGroupIds=GetAtt("AllSecurityGroups", "Value"),
InstanceType=Ref(InstanceType),
ImageId=FindInMap("AWSRegionArch2AMI", Ref("AWS::Region"),
FindInMap("AWSInstanceType2Arch", Ref(InstanceType),
"Arch")),
))
AllSecurityGroups = t.add_resource(CustomResource(
"AllSecurityGroups",
List=Ref(ExistingSecurityGroups),
AppendedItem=Ref("SecurityGroup"),
ServiceToken=GetAtt(AppendItemToListFunction, "Arn"),
))
SecurityGroup = t.add_resource(SecurityGroup(
"SecurityGroup",
SecurityGroupIngress=[
{"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
"FromPort": "80"}],
VpcId=Ref(ExistingVPC),
GroupDescription="Allow HTTP traffic to the host",
SecurityGroupEgress=[
{"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
"FromPort": "80"}],
))
AllSecurityGroups = t.add_output(Output(
"AllSecurityGroups",
Description="Security Groups that are associated with the EC2 instance",
Value=Join(", ", GetAtt(AllSecurityGroups, "Value")),
))
print(t.to_json())
|
neilhan/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm.py
|
Python
|
apache-2.0
| 7,521
| 0.002792
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
class GMM(estimator.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See TensorFlowEstimator
steps: See TensorFlowEstimator
continue_training: See TensorFlowEstimator
config: See TensorFlowEstimator
verbose: See TensorFlowEstimator
"""
super(GMM, self).__init__(
model_dir=model_dir,
config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See TensorFlowEstimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(
x, None, self._num_clusters, self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or i
|
terator.
batch_size: s
|
ize to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES] for prediction in
super(GMM, self).predict(x=x, batch_size=batch_size, as_iterable=True)])
def clusters(self):
"""Returns cluster centers."""
clusters = tf.contrib.framework.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return tf.contrib.framework.load_variable(
self.model_dir,
gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
return features
def _get_train_ops(self, features, _):
(_,
_,
losses,
training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
incr_step = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
loss = tf.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores,
model_predictions,
_,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {
GMM.SCORES: tf.reduce_sum(losses),
}
|
aikramer2/spaCy
|
spacy/tests/lang/en/test_prefix_suffix_infix.py
|
Python
|
mit
| 4,124
| 0.000242
|
# coding: utf-8
"""Test that tokenizer prefixes, suffixes and infixes are handled correctly."""
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text', ["(can)"])
def test_tokenizer_splits_no_special(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["can't"])
def test_tokenizer_splits_no_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["(can't"])
def test_tokenizer_splits_prefix_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["can't)"])
def test_tokenizer_splits_suffix_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["(can't)"])
def test_tokenizer_splits_even_wrap(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize('text', ["(can't?)"])
def test_tokenizer_splits_uneven_wrap(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize('text,length', [("U.S.", 1), ("us.", 2), ("(U.S.", 2)])
def test_tokenizer_splits_prefix_interact(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize('text', ["U.S.)"])
def test_tokenizer_splits_suffix_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["(U.S.)"])
def test_tokenizer_splits_even_wrap_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["(U.S.?)"])
def test_tokenizer_splits
|
_uneven_wrap_interact(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 4
@pytest.mark.parametrize('text', ["best-known"])
def test_tokenizer_splits_hyphens(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mar
|
k.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"])
def test_tokenizer_splits_numeric_range(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["best.Known", "Hello.World"])
def test_tokenizer_splits_period_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize('text', ["Hello,world", "one,two"])
def test_tokenizer_splits_comma_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[0].text == text.split(",")[0]
assert tokens[1].text == ","
assert tokens[2].text == text.split(",")[1]
@pytest.mark.parametrize('text', ["best...Known", "best...known"])
def test_tokenizer_splits_ellipsis_infix(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
def test_tokenizer_splits_double_hyphen_infix(en_tokenizer):
tokens = en_tokenizer("No decent--let alone well-bred--people.")
assert tokens[0].text == "No"
assert tokens[1].text == "decent"
assert tokens[2].text == "--"
assert tokens[3].text == "let"
assert tokens[4].text == "alone"
assert tokens[5].text == "well"
assert tokens[6].text == "-"
assert tokens[7].text == "bred"
assert tokens[8].text == "--"
assert tokens[9].text == "people"
@pytest.mark.xfail
def test_tokenizer_splits_period_abbr(en_tokenizer):
text = "Today is Tuesday.Mr."
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[0].text == "Today"
assert tokens[1].text == "is"
assert tokens[2].text == "Tuesday"
assert tokens[3].text == "."
assert tokens[4].text == "Mr."
@pytest.mark.xfail
def test_tokenizer_splits_em_dash_infix(en_tokenizer):
# Re Issue #225
tokens = en_tokenizer("""Will this road take me to Puddleton?\u2014No, """
"""you'll have to walk there.\u2014Ariel.""")
assert tokens[6].text == "Puddleton"
assert tokens[7].text == "?"
assert tokens[8].text == "\u2014"
|
SciTools/iris
|
lib/iris/__init__.py
|
Python
|
lgpl-3.0
| 14,621
| 0
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package for handling multi-dimensional data and associated metadata.
.. note ::
The Iris documentation has further usage information, including
a :ref:`user guide <user_guide_index>` which should be the first port of
call for new users.
The functions in this module provide the main way to load and/or save
your data.
The :func:`load` function provides a simple way to explore data from
the interactive Python prompt. It will convert the source data into
:class:`Cubes <iris.cube.Cube>`, and combine those cubes into
higher-dimensional cubes where possible.
The :func:`load_cube` and :func:`load_cubes` functions are similar to
:func:`load`, but they raise an exception if the number of cubes is not
what was expected. They are more useful in scripts, where they can
provide an early sanity check on incoming data.
The :func:`load_raw` function is provided for those occasions where the
automatic combination of cubes into higher-dimensional cubes is
undesirable. However, it is intended as a tool of last resort! If you
experience a problem with the automatic combination process then please
raise an issue with the Iris developers.
To persist a cube to the file-system, use the :func:`save` function.
All the load functions share very similar arguments:
* uris:
Either a single filename/URI expressed as a string or
:class:`pathlib.PurePath`, or an iterable of filenames/URIs.
Filenames can contain `~` or `~user` abbreviations, and/or
Unix shell-style wildcards (e.g. `*` and `?`). See the
standard library function :func:`os.path.expanduser` and
module :mod:`fnmatch` for more details.
.. warning::
If supplying a URL, only OPeNDAP Data Sources are supported.
* constraints:
Either a single constraint, or an iterable of constraints.
Each constraint can be either a string, an instance of
:class:`iris.Constraint`, or an instance of
:class:`iris.AttributeConstraint`. If the constraint is a string
it will be used to match against cube.name().
.. _constraint_egs:
For example::
# Load air temperature data.
load_cube(uri, 'air_temperature')
# Load data with a specific model level number.
load_cube(uri, iris.Constraint(model_level_number=1))
# Load data with a specific STASH code.
load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004'))
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
For example::
def callback(cube, field, filename):
# Extract ID from filenames given as: <prefix>__<exp_id>
experiment_id = filename.split('__')[1]
experiment_coord = iris.coords.AuxCoord(
experiment_id, long_name='experiment_id')
cube.add_aux_coord(experiment_coord)
"""
import contextlib
import glob
import itertools
import os.path
import pathlib
import threading
import iris._constraints
from iris._deprecation import IrisDeprecation, warn_deprecated
import iris.config
import iris.io
try:
import iris_sample_data
except ImportError:
iris_sample_data = None
# Iris revision.
__version__ = "3.3.dev0"
# Restrict the names imported when using "from iris import *"
__all__ = [
"AttributeConstraint",
"Constraint",
"FUTURE",
"Future",
"IrisDeprecation",
"NameConstraint",
"load",
"load_cube",
"load_cubes",
"load_raw",
"sample_data_path",
"save",
"site_configuration",
]
Constraint = iris._constraints.Constraint
AttributeConstraint = iris._constraints.AttributeConstraint
NameConstraint = iris._constraints.NameConstraint
class Future(threading.local):
"""Run-time configuration controller."""
def __init__(self):
"""
A container for run-time options controls.
To adjust the values simply update the relevant attribute from
within your code. For example::
iris.FUTURE.example_future_flag = False
If Iris code is executed with multiple threads, note the values of
these options are thread-specific.
.. note::
iris.FUTURE.example_future_flag does not exist. It is provided
as an example because there are currently no flags in
iris.Future.
"""
# The flag 'example_future_flag' is provided as a future reference
# for the structure of this class.
#
# self.__dict__['example_future_flag'] = example_future_flag
pass
def __repr__(self):
# msg = ('Future(example_future_flag={})')
# return msg.format(self.example_future_flag)
msg = "Future()"
return msg.format()
# deprecated_options = {'example_future_flag': 'warning',}
deprecated_options = {}
def __setattr__(self, name, value):
if name in self.deprecated_options:
level = self.deprecated_options[name]
if level == "error" and not value:
emsg = (
"setting the 'Future' property {prop!r} has been "
"deprecated to be removed in a future release, and "
"deprecated {prop!r} behaviour has been removed. "
"Please remove code that sets this property."
)
raise AttributeError(emsg.format(prop=name))
else:
msg = (
"setting the 'Future' property {!r} is deprecated "
"and will be removed in a future release. "
"Please remove code that sets this property."
)
warn_deprecated(msg.format(name))
if name not in self.__dict__:
msg = "'Future' object has no attribute {!r}".format(name)
raise AttributeError(msg)
self.__dict__[name] = value
@contextlib.contextmanager
def context(self, **kwargs):
"""
Return a context manager which allows temporary modification of
the option values for the active thread.
On entry to the `with` statement, all keyword arguments are
|
applied to th
|
e Future object. On exit from the `with`
statement, the previous state is restored.
For example::
with iris.FUTURE.context(example_future_flag=False):
# ... code that expects some past behaviour
.. note::
iris.FUTURE.example_future_flag does not exist and is
provided only as an example since there are currently no
flags in Future.
"""
# Save the current context
current_state = self.__dict__.copy()
# Update the state
for name, value in kwargs.items():
setattr(self, name, value)
try:
yield
finally:
# Return the state
self.__dict__.clear()
self.__dict__.update(current_state)
#: Object containing all the Iris run-time options.
FUTURE = Future()
# Initialise the site configuration dictionary.
#: Iris site configuration dictionary.
site_configuration = {}
try:
from iris.site_config import update as _update
except ImportError:
pass
else:
_update(site_configuration)
def _generate_cubes(uris, callback, constraints):
"""Returns a generator of cubes given the URIs and a callback."""
if isinstance(uris, (str, pathlib.PurePath)):
uris = [uris]
# Group collections of uris by their iris handler
# Create list of tuples relating schemes
|
endlessm/chromium-browser
|
third_party/depot_tools/recipes/recipe_modules/git/examples/full.py
|
Python
|
bsd-3-clause
| 5,942
| 0.009424
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'git',
]
def RunSteps(api):
url = 'https://chromium.googlesource.com/chromium/src.git'
# git.checkout can optionally dump GIT_CURL_VERBOSE traces to a log file,
# useful for debugging git access issues that are reproducible only on bots.
curl_trace_file = None
if api.properties.get('use_curl_trace'):
curl_trace_file = api.path['start_dir'].join('curl_trace.log')
submodule_update_force = api.properties.get('submodule_update_force', False)
submodule_update_recursive = api.properties.get('submodule_update_recursive',
True)
# You can use api.git.checkout to perform all the steps of a safe checkout.
revision = (api.buildbucket.gitiles_commit.ref or
api.buildbucket.gitiles_commit.id)
retVal = api.git.checkout(
url,
ref=revision,
recursive=True,
submodule_update_force=submodule_update_force,
set_got_revision=api.properties.get('set_got_revision'),
curl_trace_file=curl_trace_file,
remote_name=api.properties.get('remote_name'),
display_fetch_size=api.properties.get('display_fetch_size'),
file_name=api.properties.get('checkout_file_name'),
submodule_update_recursive=submodule_update_recursive,
use_git_cache=api.properties.get('use_git_cache'),
tags=api.properties.get('tags'))
assert retVal == "deadbeef", (
"expected retVal to be %r but was %r" % ("deadbeef", retVal))
# count_objects shows number and size of objects in .git dir.
api.git.count_objects(
name='count-objects',
can_fail_build=api.properties.get('count_objects_can_fail_build'),
git_config_options={'foo': 'bar'})
# Get the remote URL.
api.git.get_remote_url(
step_test_data=lambda: api.raw_io.test_api.stream_output('foo'))
api.git.get_timestamp(test_data='foo')
# You can use api.git.fetc
|
h_tags to fetch all tags from the remote
api.git.fetch_tags(api.properties.get('remote_name'))
# If you need to run more arbitrary git commands,
|
you can use api.git itself,
# which behaves like api.step(), but automatically sets the name of the step.
with api.context(cwd=api.path['checkout']):
api.git('status')
api.git('status', name='git status can_fail_build',
can_fail_build=True)
api.git('status', name='git status cannot_fail_build',
can_fail_build=False)
# You should run git new-branch before you upload something with git cl.
api.git.new_branch('refactor') # Upstream is origin/master by default.
# And use upstream kwarg to set up different upstream for tracking.
api.git.new_branch('feature', upstream='refactor')
# You can use api.git.rebase to rebase the current branch onto another one
api.git.rebase(name_prefix='my repo', branch='origin/master',
dir_path=api.path['checkout'],
remote_name=api.properties.get('remote_name'))
if api.properties.get('cat_file', None):
step_result = api.git.cat_file_at_commit(api.properties['cat_file'],
revision,
stdout=api.raw_io.output())
if 'TestOutput' in step_result.stdout:
pass # Success!
# Bundle the repository.
api.git.bundle_create(
api.path['start_dir'].join('all.bundle'))
def GenTests(api):
yield api.test('basic')
yield api.test('basic_tags') + api.properties(tags=True)
yield api.test('basic_ref') + api.buildbucket.ci_build(git_ref='refs/foo/bar')
yield api.test('basic_branch') + api.buildbucket.ci_build(
git_ref='refs/heads/testing')
yield api.test('basic_hash') + api.buildbucket.ci_build(
revision='abcdef0123456789abcdef0123456789abcdef01', git_ref=None)
yield api.test('basic_file_name') + api.properties(checkout_file_name='DEPS')
yield api.test('basic_submodule_update_force') + api.properties(
submodule_update_force=True)
yield api.test('platform_win') + api.platform.name('win')
yield (
api.test('curl_trace_file') +
api.properties(use_curl_trace=True) +
api.buildbucket.ci_build(git_ref='refs/foo/bar')
)
yield (
api.test('can_fail_build') +
api.step_data('git status can_fail_build', retcode=1)
)
yield (
api.test('cannot_fail_build') +
api.step_data('git status cannot_fail_build', retcode=1)
)
yield (
api.test('set_got_revision') +
api.properties(set_got_revision=True)
)
yield (
api.test('rebase_failed') +
api.step_data('my repo rebase', retcode=1)
)
yield api.test('remote_not_origin') + api.properties(remote_name='not_origin')
yield (
api.test('count-objects_delta') +
api.properties(display_fetch_size=True))
yield (
api.test('count-objects_failed') +
api.step_data('count-objects', retcode=1))
yield (
api.test('count-objects_with_bad_output') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))))
yield (
api.test('count-objects_with_bad_output_fails_build') +
api.step_data(
'count-objects',
stdout=api.raw_io.output(api.git.count_objects_output('xxx'))) +
api.properties(count_objects_can_fail_build=True))
yield (
api.test('cat-file_test') +
api.step_data('git cat-file abcdef12345:TestFile',
stdout=api.raw_io.output('TestOutput')) +
api.buildbucket.ci_build(revision='abcdef12345', git_ref=None) +
api.properties(cat_file='TestFile'))
yield (
api.test('git-cache-checkout') +
api.properties(use_git_cache=True))
|
CYBAI/servo
|
python/mach/setup.py
|
Python
|
mpl-2.0
| 1,204
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
try:
from setuptoo
|
ls import setup
except ImportError:
from distutils.core import setup
VERSION = '1.0.0'
README = open('README.rst').read()
setup(
name='mach',
description='Generic command line command dispatching framework.',
long_description=README,
license='MPL 2.0',
author='Gregory
|
Szorc',
author_email='gregory.szorc@gmail.com',
url='https://developer.mozilla.org/en-US/docs/Developer_Guide/mach',
packages=['mach', 'mach.mixin'],
version=VERSION,
classifiers=[
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'blessings',
'mozfile',
'mozprocess',
'six',
],
tests_require=['mock'],
)
|
rodriguesrl/reddit-clone-udemy
|
accounts/urls.py
|
Python
|
mit
| 366
| 0.002732
|
from django.contrib.auth import views as auth_views
from django.conf.urls import url
from . import views
app_name = "accounts"
urlpatterns = [
url(r'^login/$', auth_views.login, {'template_name': 'accounts/signin.html'}, name='signin'),
url(r'^signup/', views.SignUpView.as_view(),
|
name="signup"),
url(r'^logout/', auth_views.logout, name="logout"),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.