repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
lsaffre/blog
|
docs/blog/2011/0527.py
|
Python
|
agpl-3.0
| 806
| 0.017677
|
# -*- coding: UTF-8 -*-
import logging
unicode_string = u"Татьяна"
utf8_string = "'Татьяна' is an invalid string value"
logging.warning(unicode_string)
logging.warning(utf8_string)
try:
raise Exception(utf8_string)
except Exception,e:
print "--- (Log a traceback of the exception):"
logging.exception(e)
print "--- Everything okay until here, but now we run into trouble:"
logging.warning(u"1 Deferred %s : %s",unicode_string,e)
logging.warning(u"2 Deferred %s : %s",unicode_string,utf8_string)
print "--- some workarounds:"
logging.warning(u"3 Deferred %s : %s",unicode_string,utf8_string.decode('UTF-8')
|
)
from django.utils.encoding
|
import force_unicode
logging.warning(u"4 Deferred %s : %s",unicode_string,force_unicode(utf8_string))
|
AccelAI/accel.ai
|
flask-aws/lib/python2.7/site-packages/blessed/sequences.py
|
Python
|
mit
| 26,038
| 0.000154
|
# encoding: utf-8
" This sub-module provides 'sequence awareness' for blessed."
__author__ = 'Jeff Quast <contact@jeffquast.com>'
__license__ = 'MIT
|
'
__all__ = ('init_sequence_patterns', 'Sequence', 'SequenceTextWrapper',)
# built-ins
import functools
import textwrap
import warnings
import math
import sys
import re
# local
from ._binterms import binary_terminals as _BINTERM_UNSUPPORTED
# 3rd-party
import wcwidth # https://github.com
|
/jquast/wcwidth
_BINTERM_UNSUPPORTED_MSG = (
u"Terminal kind {0!r} contains binary-packed capabilities, blessed "
u"is likely to fail to measure the length of its sequences.")
if sys.version_info[0] == 3:
text_type = str
else:
text_type = unicode # noqa
def _merge_sequences(inp):
"""Merge a list of input sequence patterns for use in a regular expression.
Order by lengthyness (full sequence set precedent over subset),
and exclude any empty (u'') sequences.
"""
return sorted(list(filter(None, inp)), key=len, reverse=True)
def _build_numeric_capability(term, cap, optional=False,
base_num=99, nparams=1):
""" Build regexp from capabilities having matching numeric
parameter contained within termcap value: n->(\d+).
"""
_cap = getattr(term, cap)
opt = '?' if optional else ''
if _cap:
args = (base_num,) * nparams
cap_re = re.escape(_cap(*args))
for num in range(base_num - 1, base_num + 2):
# search for matching ascii, n-1 through n+1
if str(num) in cap_re:
# modify & return n to matching digit expression
cap_re = cap_re.replace(str(num), r'(\d+)%s' % (opt,))
return cap_re
warnings.warn('Unknown parameter in %r (%r, %r)' % (cap, _cap, cap_re))
return None # no such capability
def _build_any_numeric_capability(term, cap, num=99, nparams=1):
""" Build regexp from capabilities having *any* digit parameters
(substitute matching \d with pattern \d and return).
"""
_cap = getattr(term, cap)
if _cap:
cap_re = re.escape(_cap(*((num,) * nparams)))
cap_re = re.sub('(\d+)', r'(\d+)', cap_re)
if r'(\d+)' in cap_re:
return cap_re
warnings.warn('Missing numerics in %r, %r' % (cap, cap_re))
return None # no such capability
def get_movement_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
to cause movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
return set([
# carriage_return
re.escape(term.cr),
# column_address: Horizontal position, absolute
bnc(cap='hpa'),
# row_address: Vertical position #1 absolute
bnc(cap='vpa'),
# cursor_address: Move to row #1 columns #2
bnc(cap='cup', nparams=2),
# cursor_down: Down one line
re.escape(term.cud1),
# cursor_home: Home cursor (if no cup)
re.escape(term.home),
# cursor_left: Move left one space
re.escape(term.cub1),
# cursor_right: Non-destructive space (move right one space)
re.escape(term.cuf1),
# cursor_up: Up one line
re.escape(term.cuu1),
# param_down_cursor: Down #1 lines
bnc(cap='cud', optional=True),
# restore_cursor: Restore cursor to position of last save_cursor
re.escape(term.rc),
# clear_screen: clear screen and home cursor
re.escape(term.clear),
# enter/exit_fullscreen: switch to alternate screen buffer
re.escape(term.enter_fullscreen),
re.escape(term.exit_fullscreen),
# forward cursor
term._cuf,
# backward cursor
term._cub,
])
def get_wontmove_sequence_patterns(term):
""" Build and return set of regexp for capabilities of ``term`` known
not to cause any movement.
"""
bnc = functools.partial(_build_numeric_capability, term)
bna = functools.partial(_build_any_numeric_capability, term)
return list([
# print_screen: Print contents of screen
re.escape(term.mc0),
# prtr_off: Turn off printer
re.escape(term.mc4),
# prtr_on: Turn on printer
re.escape(term.mc5),
# save_cursor: Save current cursor position (P)
re.escape(term.sc),
# set_tab: Set a tab in every row, current columns
re.escape(term.hts),
# enter_bold_mode: Turn on bold (extra bright) mode
re.escape(term.bold),
# enter_standout_mode
re.escape(term.standout),
# enter_subscript_mode
re.escape(term.subscript),
# enter_superscript_mode
re.escape(term.superscript),
# enter_underline_mode: Begin underline mode
re.escape(term.underline),
# enter_blink_mode: Turn on blinking
re.escape(term.blink),
# enter_dim_mode: Turn on half-bright mode
re.escape(term.dim),
# cursor_invisible: Make cursor invisible
re.escape(term.civis),
# cursor_visible: Make cursor very visible
re.escape(term.cvvis),
# cursor_normal: Make cursor appear normal (undo civis/cvvis)
re.escape(term.cnorm),
# clear_all_tabs: Clear all tab stops
re.escape(term.tbc),
# change_scroll_region: Change region to line #1 to line #2
bnc(cap='csr', nparams=2),
# clr_bol: Clear to beginning of line
re.escape(term.el1),
# clr_eol: Clear to end of line
re.escape(term.el),
# clr_eos: Clear to end of screen
re.escape(term.clear_eos),
# delete_character: Delete character
re.escape(term.dch1),
# delete_line: Delete line (P*)
re.escape(term.dl1),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# insert_line: Insert line (P*)
re.escape(term.il1),
# parm_dch: Delete #1 characters
bnc(cap='dch'),
# parm_delete_line: Delete #1 lines
bnc(cap='dl'),
# exit_alt_charset_mode: End alternate character set (P)
re.escape(term.rmacs),
# exit_am_mode: Turn off automatic margins
re.escape(term.rmam),
# exit_attribute_mode: Turn off all attributes
re.escape(term.sgr0),
# exit_ca_mode: Strings to end programs using cup
re.escape(term.rmcup),
# exit_insert_mode: Exit insert mode
re.escape(term.rmir),
# exit_standout_mode: Exit standout mode
re.escape(term.rmso),
# exit_underline_mode: Exit underline mode
re.escape(term.rmul),
# flash_hook: Flash switch hook
re.escape(term.hook),
# flash_screen: Visible bell (may not move cursor)
re.escape(term.flash),
# keypad_local: Leave 'keyboard_transmit' mode
re.escape(term.rmkx),
# keypad_xmit: Enter 'keyboard_transmit' mode
re.escape(term.smkx),
# meta_off: Turn off meta mode
re.escape(term.rmm),
# meta_on: Turn on meta mode (8th-bit on)
re.escape(term.smm),
# orig_pair: Set default pair to its original value
re.escape(term.op),
# parm_ich: Insert #1 characters
bnc(cap='ich'),
# parm_index: Scroll forward #1
bnc(cap='indn'),
# parm_insert_line: Insert #1 lines
bnc(cap='il'),
# erase_chars: Erase #1 characters
bnc(cap='ech'),
# parm_rindex: Scroll back #1 lines
bnc(cap='rin'),
# parm_up_cursor: Up #1 lines
bnc(cap='cuu'),
# scroll_forward: Scroll text up (P)
re.escape(term.ind),
# scroll_reverse: Scroll text down (P)
re.escape(term.rev),
# tab: Tab to next 8-space hardware tab stop
re.escape(term.ht),
# set_a_background: Set background color to #1, using ANSI escape
bna(cap='setab', num=1),
bna(cap='setab', num=(term.number_of_colors - 1)),
# set_a_foreground: Set foreground color to #1, using ANSI escape
bna(cap='setaf', num=1),
bna(cap='setaf', num=(term.number_of_colors - 1)),
|
stxnext-kindergarten/presence-analyzer-agrochowski
|
src/presence_analyzer/__init__.py
|
Python
|
mit
| 97
| 0
|
# -*- coding: utf-8 -*-
from .main import app
from . im
|
port views
"""
Initial
|
ize the package
"""
|
ntt-sic/nova
|
nova/tests/api/openstack/compute/contrib/test_certificates.py
|
Python
|
apache-2.0
| 2,810
| 0
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the s
|
pecific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.contrib import certificates
from nova import context
from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
def f
|
ake_get_root_cert(context, *args, **kwargs):
return 'fakeroot'
def fake_create_cert(context, *args, **kwargs):
return 'fakepk', 'fakecert'
class CertificatesTest(test.NoDBTestCase):
def setUp(self):
super(CertificatesTest, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.controller = certificates.CertificatesController()
def test_translate_certificate_view(self):
pk, cert = fake_create_cert(self.context)
view = certificates._translate_certificate_view(cert, pk)
self.assertEqual(view['data'], cert)
self.assertEqual(view['private_key'], pk)
def test_certificates_show_root(self):
self.stubs.Set(rpc, 'call', fake_get_root_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root')
res_dict = self.controller.show(req, 'root')
cert = fake_get_root_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': None}}
self.assertEqual(res_dict, response)
def test_certificates_create_certificate(self):
self.stubs.Set(rpc, 'call', fake_create_cert)
req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/')
res_dict = self.controller.create(req)
pk, cert = fake_create_cert(self.context)
response = {'certificate': {'data': cert, 'private_key': pk}}
self.assertEqual(res_dict, response)
class CertificatesSerializerTest(test.NoDBTestCase):
def test_index_serializer(self):
serializer = certificates.CertificateTemplate()
text = serializer.serialize(dict(
certificate=dict(
data='fakecert',
private_key='fakepk'),
))
tree = etree.fromstring(text)
self.assertEqual('certificate', tree.tag)
self.assertEqual('fakepk', tree.get('private_key'))
self.assertEqual('fakecert', tree.get('data'))
|
iksws/GnomeTurboNoteExtension
|
turbonote-adds/notestyle.py
|
Python
|
gpl-3.0
| 10,601
| 0.016602
|
from gi.repository import Gtk, Gdk,GObject,Pango
import commands
import time
import sys,os
import threading
import sqlite3
from config_note import Config
config_note = Config()
path = "/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/turbonote-adds/"
path_icon = "/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/"
stay = ""
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("SELECT * FROM notestyle")
rows = a.fetchall()
f1 = (str(rows[0][0]))
f2 = (str(rows[0][1]))
f3 = (str(rows[0][2]))
f4 = (str(rows[0][3]))
f5 = str(rows[0][4])
f6 = str(rows[0][5])
connb.close()
def setF1(f):
global f1
f1 = f
def setF2(f):
global f2
f2 = f
def setF3(f):
global f3
f3 = f
def setF4(f):
global f4
f4 = f
def setF5(f):
global f5
f5 = f
def setF6(f):
global f6
f6 = f
class WindowStyle(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="SVN UPDATE")
self.set_default_size(520, 400)
self.set_border_width(15)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_resizable( False )
hb = Gtk.HeaderBar()
hb.props.show_close_button = True
hb.props.title = "NOTE STYLE FOR WINDOWS VIEW"
self.set_titlebar(hb)
self.grid = Gtk.Grid()
self.add(self.grid)
self.space = Gtk.Label()
self.space.set_text(" ")
self.space2 = Gtk.Label()
self.space2.set_text(" ")
self.space3 = Gtk.Label()
self.space3.set_text(" ")
self.space4 = Gtk.Labe
|
l()
sel
|
f.space4.set_text(" ")
self.space5 = Gtk.Label()
self.space5.set_text(" ")
self.title_body = Gtk.Label()
self.title_body.set_text("Body Components")
self.title_title = Gtk.Label()
self.title_title.set_text("Title Components")
self.noteTextLabel = Gtk.Label("\n\n\n\n\n Select font for text note... \n\n\n\n\n")
self.noteTextTitle = Gtk.Label(" Note Title... ")
fontbt = Gtk.Button()
fontbt.set_tooltip_text("Body font")
fontbt.connect("clicked", self.on_clickedTextFont)
fontcolorbt = Gtk.Button()
fontcolorbt.set_tooltip_text("Text body color")
fontcolorbt.connect("clicked", self.on_clickedTextColor)
fontbtTitle = Gtk.Button()
fontbtTitle.set_tooltip_text("Font title")
fontbtTitle.connect("clicked", self.on_clickedTextFontTitle)
fontcolorbtTitle = Gtk.Button()
fontcolorbtTitle.set_tooltip_text("title text color")
fontcolorbtTitle.connect("clicked", self.on_clickedTextColorTitle)
bodyColor = Gtk.Button()
bodyColor.set_tooltip_text("Body Color")
bodyColor.connect("clicked", self.on_clickedTextColorBody)
bodytitleColor = Gtk.Button()
bodytitleColor.set_tooltip_text("Title color")
bodytitleColor.connect("clicked", self.on_clickedTextColorTitleBody)
save = Gtk.Button()
save.set_tooltip_text("Save Config")
save.connect("clicked", self.on_save)
self.colorBody = Gtk.Image()
self.colorBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventb" + config_note.getColor() + ".png")
bodyColor.add(self.colorBody)
self.colorTextBody = Gtk.Image()
self.colorTextBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventtb" + config_note.getColor() + ".png")
fontcolorbt.add(self.colorTextBody)
self.fontTextBody = Gtk.Image()
self.fontTextBody.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventt" + config_note.getColor() + ".png")
fontbt.add(self.fontTextBody)
self.colorBodyTitle = Gtk.Image()
self.colorBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventb" + config_note.getColor() + ".png")
fontcolorbtTitle.add(self.colorBodyTitle)
self.colorTextBodyTitle = Gtk.Image()
self.colorTextBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventtb" + config_note.getColor() + ".png")
bodytitleColor.add(self.colorTextBodyTitle)
self.fontTextBodyTitle = Gtk.Image()
self.fontTextBodyTitle.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_new_eventt" + config_note.getColor() + ".png")
fontbtTitle.add(self.fontTextBodyTitle)
self.saveimg = Gtk.Image()
self.saveimg.set_from_file("/usr/share/gnome-shell/extensions/turbonote@iksws.com.br/icons/ic_action_save" + config_note.getColor() + ".png")
save.add(self.saveimg)
self.grid.attach(self.title_body, 0,0 , 3 , 1)
self.grid.attach(self.space2, 0,1 , 1 , 1)
self.grid.attach(bodyColor ,0,2 , 1 , 1)
self.grid.attach(fontcolorbt ,1,2 , 1 , 1)
self.grid.attach(fontbt ,2,2 , 1 , 1)
self.grid.attach(self.space, 3,2 , 1 , 3)
self.grid.attach(self.noteTextTitle, 4,0 , 1 , 2)
self.grid.attach(self.noteTextLabel, 4,1 , 1 , 8)
self.grid.attach(self.space3, 0,3 , 3 , 1)
self.grid.attach(self.title_title, 0,4 , 3 , 1)
self.grid.attach(self.space4, 0,5 , 3 , 1)
self.grid.attach(fontbtTitle, 2,6 , 1 , 1)
self.grid.attach(bodytitleColor, 1,6 , 1 , 1)
self.grid.attach(fontcolorbtTitle, 0,6, 1 , 1)
self.grid.attach(self.space5, 0,7 , 3 , 1)
self.grid.attach(save, 0,8 , 3 , 1)
font1 = Gdk.RGBA()
font2 = Gdk.RGBA()
font3 = Gdk.RGBA()
font4 = Gdk.RGBA()
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("SELECT * FROM notestyle")
rows = a.fetchall()
font1.parse(str(rows[0][0]))
font2.parse(str(rows[0][1]))
font3.parse(str(rows[0][2]))
font4.parse(str(rows[0][3]))
fontbt = str(rows[0][4])
fontbb = str(rows[0][5])
connb.close()
self.noteTextTitle.override_color(Gtk.StateFlags.NORMAL, font3)
self.noteTextTitle.override_background_color(Gtk.StateFlags.NORMAL, font1)
self.noteTextLabel.override_color(Gtk.StateFlags.NORMAL, font4)
self.noteTextLabel.override_background_color(Gtk.StateFlags.NORMAL, font2)
self.noteTextTitle.modify_font(Pango.FontDescription(fontbt))
self.noteTextLabel.modify_font(Pango.FontDescription(fontbb))
def rgb_to_hex(self,rgb):
return '#%02x%02x%02x' % rgb
def on_clickedTextColorTitleBody(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_color_selection()
rgb = colorsel.get_current_rgba().to_string()
rgb = rgb.replace("rgb","").replace("(","").replace(")","").split(',')
setF1(self.rgb_to_hex((int(rgb[0]), int(rgb[1]), int(rgb[2]))))
self.noteTextTitle.override_background_color(Gtk.StateFlags.NORMAL, colorsel.get_current_rgba())
cdia.destroy()
def on_save(self, widget):
connb = sqlite3.connect(path + 'turbo.db')
a = connb.cursor()
a.execute("UPDATE notestyle SET titulo_color ='" + f1 +"',body_color='" + f2 + "',titulo_font_color ='" + f3 + "',body_font_color ='" + f4 + "',titulo_font_type='" + f5 + "',body_font_type = '" + f6 + "' where 1=1;")
connb.commit()
connb.close()
def on_clickedTextColorBody(self, widget):
cdia = Gtk.ColorSelectionDialog("Select color")
response = cdia.run()
if response == Gtk.ResponseType.OK:
colorsel = cdia.get_col
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/google/protobuf/any_pb2.py
|
Python
|
bsd-2-clause
| 2,652
| 0.006787
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/any.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/any.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42o\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z%github.com/golang/protobuf/ptypes/any\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_ANY = _descriptor.Descriptor(
name='Any',
full_name='google.protobuf.Any',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Any.type_u
|
rl', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containi
|
ng_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Any.value', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=84,
)
DESCRIPTOR.message_types_by_name['Any'] = _ANY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Any = _reflection.GeneratedProtocolMessageType('Any', (_message.Message,), dict(
DESCRIPTOR = _ANY,
__module__ = 'google.protobuf.any_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Any)
))
_sym_db.RegisterMessage(Any)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\010AnyProtoP\001Z%github.com/golang/protobuf/ptypes/any\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
|
bartscheers/tkp
|
tkp/accessors/aartfaaccasaimage.py
|
Python
|
bsd-2-clause
| 1,347
| 0
|
import logging
from tkp.accessors import CasaImage
logger = logging.getLogger(__name__)
class AartfaacCasaImage(CasaImage):
def __init__(self, url, plane=0, beam=None):
super(AartfaacCasaImage, self).__init__(url, plane=0, beam=None)
self.taustart_ts = self.parse_taustartts()
self.telescope = self.table.getkeyword('coords')['telescope']
# TODO: header does't contain integration time
# aartfaac imaginig pipeline issue #25
self.tau_time = 1
def parse_frequency(self):
"""
Extract frequency related information from headers
(Overrides the implementation in CasaImage, which pulls the entries
from the 'spectral2' sub-table.)
"""
keywords = self.table.getkeywords()
# due to some undocumented casacore feature, the 'spectral' keyword
# changes from spectral1 to spectral2 when AARTFAAC imaging developers
# change
|
d some of the header information. For now we will try both
# locations.
if 'spectral1' in keywords['coords']:
keyword = 'spectral1'
|
if 'spectral2' in keywords['coords']:
keyword = 'spectral2'
freq_eff = keywords['coords'][keyword]['restfreq']
freq_bw = keywords['coords'][keyword]['wcs']['cdelt']
return freq_eff, freq_bw
|
TheWardoctor/Wardoctors-repo
|
plugin.video.salts/scrapers/xmovies8_scraper.py
|
Python
|
apache-2.0
| 5,391
| 0.006678
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants
|
import VIDEO_TYPES
from salts_lib.constants import XHR
import scraper
VIDEO_URL = '/video_info/iframe'
class Scraper(scraper.Scraper):
OPTIONS = ['h
|
ttps://xmovies8.org', 'https://putlockerhd.co', 'https://afdah.org', 'https://watch32hd.co']
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'xmovies8'
def resolve_link(self, link):
link = link.split('|', 1)[0]
html = self._http_get(link, allow_redirect=False, method='HEAD', cache_limit=0)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=0)
match = re.search('var\s*video_id\s*=\s*"([^"]+)', html)
if not match: return hosters
video_id = match.group(1)
headers = {'Referer': page_url}
headers.update(XHR)
_html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL)
html = self._http_get(vid_url, data={'v': video_id}, headers=headers, cache_limit=0)
for source, value in scraper_utils.parse_json(html, vid_url).iteritems():
match = re.search('url=(.*)', value)
if not match: continue
stream_url = urllib.unquote(match.group(1))
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/results')
params = {'q': title}
referer = search_url + '?' + urllib.urlencode(params)
headers = {'Referer': referer}
headers.update(XHR)
_html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
cookies = {'begin_referer': referer, 'prounder': 1}
html = self._http_get(search_url, params=params, cookies=cookies, cache_limit=8)
if any('jquery.js' in match.attrs['src'] for match in dom_parser2.parse_dom(html, 'script', req='src')):
html = self._http_get(search_url, params=params, cookies=cookies, cache_limit=0)
for _attrs, result in dom_parser2.parse_dom(html, 'div', {'class': 'cell'}):
title_frag = dom_parser2.parse_dom(result, 'div', {'class': 'video_title'})
year_frag = dom_parser2.parse_dom(result, 'div', {'class': 'video_quality'})
if not title_frag: continue
match = dom_parser2.parse_dom(title_frag[0].content, 'a', req='href')
if not match: continue
match_url = match[0].attrs['href']
match_title = match[0].content
try:
match = re.search('\s+(\d{4})\s+', year_frag[0].content)
match_year = match.group(1)
except:
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
@classmethod
def get_settings(cls):
settings = super(cls, cls).get_settings()
settings.append(' <setting id="%s-default_url" type="text" visible="false"/>' % (cls.get_name()))
return settings
scraper_utils.set_default_url(Scraper)
|
mikehulluk/morphforge
|
src/morphforgeexamples/exset2_singlecell_simulations/singlecell_simulation010.py
|
Python
|
bsd-2-clause
| 3,237
| 0.006179
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
"""The response of a single compartment neuron with leak channels to step current injection.
In this exa
|
mple, we build a single section neuron, with passive channels,
and stimulate it with a step current clamp of 200pA for 100ms starting at t=100ms.
We also create a summary pdf of the simulation.
"""
from morphforge
|
.stdimports import *
from morphforgecontrib.stdimports import StdChlLeak
# Create the morphology for the cell:
morphDict1 = {'root': {'length': 20, 'diam': 20, 'id':'soma'} }
m1 = MorphologyTree.fromDictionary(morphDict1)
# Create the environment:
env = NEURONEnvironment()
# Create the simulation:
sim = env.Simulation()
# Create a cell:
cell = sim.create_cell(name="Cell1", morphology=m1)
# Apply the mechanisms to the cells
lk_chl = env.Channel(StdChlLeak,
name="LkChl",
conductance=qty("0.25:mS/cm2"),
reversalpotential=qty("-51:mV"),
)
cell.apply_channel( lk_chl)
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('1.0:uF/cm2'))
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(name="Stim1", amp=qty("200:pA"), dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, name="SomaVoltage", cell_location = cell.soma)
sim.recordall(lk_chl, cell_location=cell.soma)
# run the simulation
results = sim.run()
# Create an output .pdf
SimulationMRedoc.build( sim ).to_pdf(__file__ + '.pdf')
# Display the results:
TagViewer([results], figtitle="The response of a neuron to step current injection", timerange=(95, 200)*units.ms, show=True)
|
andensinlimite/metaespacio
|
metaespacio/espacios/templatetags/espacios.py
|
Python
|
agpl-3.0
| 441
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Library
from ..models import Espacio
register = Library()
@register.inclusion_tag('espacios/_otros_espacios.html', takes_context=True)
def otros_espacios(context):
qs = Espacio.objects.all()
if 'e
|
spacio' in context:
obj = context['espacio']
if obj:
qs = qs.exclude(p
|
k=obj.pk)
return {'otros_espacios': qs}
|
vLBrian/boxeehack-cigamit
|
hack/boxee/scripts/OpenSubtitles/resources/lib/gui.py
|
Python
|
mit
| 11,411
| 0.063897
|
import sys
import os
import xbmc
import xbmcgui
import xbmcplugin
import threading
import socket
import urllib
from Queue import Queue
import plugins
import ConfigParser
import logging
import difflib
try: current_dlg_id = xbmcgui.getCurrentWindowDialogId()
except: current_dlg_id = 0
current_win_id = xbmcgui.getCurrentWindowId()
_ = sys.modules[ "__main__" ].__language__
__scriptname__ = sys.modules[ "__main__" ].__scriptname__
__version__ = sys.modules[ "__main__" ].__version__
STATUS_LABEL = 100
LOADING_IMAGE = 110
SUBTITLES_LIST = 120
trans_lang = {'aa' : 'Afar',
'ab' : 'Abkhaz',
'ae' : 'Avestan',
'af' : 'Afrikaans',
'ak' : 'Akan',
'am' : 'Amharic',
'an' : 'Aragonese',
'ar' : 'Arabic',
'as' : 'Assamese',
'av' : 'Avaric',
'ay' : 'Aymara',
'az' : 'Azerbaijani',
'ba' : 'Bashkir',
'be' : 'Belarusian',
'bg' : 'Bulgarian',
'bh' : 'Bihari',
'bi' : 'Bislama',
'bm' : 'Bambara',
'bn' : 'Bengali',
'bo' : 'Tibetan',
'br' : 'Breton',
'bs' : 'Bosnian',
'ca' : 'Catalan',
'ce'
|
: 'Chechen',
'ch' : 'Chamorro',
'co' : 'Corsican',
'cr' : 'Cree',
'cs' : 'Czech',
'cu' : 'Old Church Slavonic',
'cv' : 'Chuvash',
'cy' : 'Welsh',
'da' : 'Danish',
'de' : 'German',
'dv' : 'Divehi',
'dz' : 'Dzongkha',
'ee' : 'Ewe',
'el' : 'Greek',
'en' : 'English',
'eo' : 'Esperanto',
'es' : 'Spanish'
|
,
'et' : 'Estonian',
'eu' : 'Basque',
'fa' : 'Persian',
'ff' : 'Fula',
'fi' : 'Finnish',
'fj' : 'Fijian',
'fo' : 'Faroese',
'fr' : 'French',
'fy' : 'Western Frisian',
'ga' : 'Irish',
'gd' : 'Scottish Gaelic',
'gl' : 'Galician',
'gn' : 'Guaraní',
'gu' : 'Gujarati',
'gv' : 'Manx',
'ha' : 'Hausa',
'he' : 'Hebrew',
'hi' : 'Hindi',
'ho' : 'Hiri Motu',
'hr' : 'Croatian',
'ht' : 'Haitian',
'hu' : 'Hungarian',
'hy' : 'Armenian',
'hz' : 'Herero',
'ia' : 'Interlingua',
'id' : 'Indonesian',
'ie' : 'Interlingue',
'ig' : 'Igbo',
'ii' : 'Nuosu',
'ik' : 'Inupiaq',
'io' : 'Ido',
'is' : 'Icelandic',
'it' : 'Italian',
'iu' : 'Inuktitut',
'ja' : 'Japanese (ja)',
'jv' : 'Javanese (jv)',
'ka' : 'Georgian',
'kg' : 'Kongo',
'ki' : 'Kikuyu',
'kj' : 'Kwanyama',
'kk' : 'Kazakh',
'kl' : 'Kalaallisut',
'km' : 'Khmer',
'kn' : 'Kannada',
'ko' : 'Korean',
'kr' : 'Kanuri',
'ks' : 'Kashmiri',
'ku' : 'Kurdish',
'kv' : 'Komi',
'kw' : 'Cornish',
'ky' : 'Kirghiz, Kyrgyz',
'la' : 'Latin',
'lb' : 'Luxembourgish',
'lg' : 'Luganda',
'li' : 'Limburgish',
'ln' : 'Lingala',
'lo' : 'Lao',
'lt' : 'Lithuanian',
'lu' : 'Luba-Katanga',
'lv' : 'Latvian',
'mg' : 'Malagasy',
'mh' : 'Marshallese',
'mi' : 'Maori',
'mk' : 'Macedonian',
'ml' : 'Malayalam',
'mn' : 'Mongolian',
'mr' : 'Marathi',
'ms' : 'Malay',
'mt' : 'Maltese',
'my' : 'Burmese',
'na' : 'Nauru',
'nb' : 'Norwegian',
'nd' : 'North Ndebele',
'ne' : 'Nepali',
'ng' : 'Ndonga',
'nl' : 'Dutch',
'nn' : 'Norwegian Nynorsk',
'no' : 'Norwegian',
'nr' : 'South Ndebele',
'nv' : 'Navajo, Navaho',
'ny' : 'Chichewa; Chewa; Nyanja',
'oc' : 'Occitan',
'oj' : 'Ojibwe, Ojibwa',
'om' : 'Oromo',
'or' : 'Oriya',
'os' : 'Ossetian, Ossetic',
'pa' : 'Panjabi, Punjabi',
'pi' : 'Pali',
'pl' : 'Polish',
'ps' : 'Pashto, Pushto',
'pt' : 'Portuguese',
'pb' : 'Brazilian',
'qu' : 'Quechua',
'rm' : 'Romansh',
'rn' : 'Kirundi',
'ro' : 'Romanian',
'ru' : 'Russian',
'rw' : 'Kinyarwanda',
'sa' : 'Sanskrit',
'sc' : 'Sardinian',
'sd' : 'Sindhi',
'se' : 'Northern Sami',
'sg' : 'Sango',
'si' : 'Sinhala, Sinhalese',
'sk' : 'Slovak',
'sl' : 'Slovene',
'sm' : 'Samoan',
'sn' : 'Shona',
'so' : 'Somali',
'sq' : 'Albanian',
'sr' : 'Serbian',
'ss' : 'Swati',
'st' : 'Southern Sotho',
'su' : 'Sundanese',
'sv' : 'Swedish',
'sw' : 'Swahili',
'ta' : 'Tamil',
'te' : 'Telugu',
'tg' : 'Tajik',
'th' : 'Thai',
'ti' : 'Tigrinya',
'tk' : 'Turkmen',
'tl' : 'Tagalog',
'tn' : 'Tswana',
'to' : 'Tonga',
'tr' : 'Turkish',
'ts' : 'Tsonga',
'tt' : 'Tatar',
'tw' : 'Twi',
'ty' : 'Tahitian',
'ug' : 'Uighur',
'uk' : 'Ukrainian',
'ur' : 'Urdu',
'uz' : 'Uzbek',
've' : 'Venda',
'vi' : 'Vietnamese',
'vo' : 'Volapük',
'wa' : 'Walloon',
'wo' : 'Wolof',
'xh' : 'Xhosa',
'yi' : 'Yiddish',
'yo' : 'Yoruba',
'za' : 'Zhuang, Chuang',
'zh' : 'Chinese',
'zu' : 'Zulu' }
SELECT_ITEM = ( 11, 256, 61453, )
EXIT_SCRIPT = ( 10, 247, 275, 61467, 216, 257, 61448, )
CANCEL_DIALOG = EXIT_SCRIPT + ( 216, 257, 61448, )
GET_EXCEPTION = ( 216, 260, 61448, )
SELECT_BUTTON = ( 229, 259, 261, 61453, )
MOVEMENT_UP = ( 166, 270, 61478, )
MOVEMENT_DOWN = ( 167, 271, 61480, )
DEBUG_MODE = 5
# Log status codes
LOG_INFO, LOG_ERROR, LOG_NOTICE, LOG_DEBUG = range( 1, 5 )
def LOG( status, format, *args ):
if ( DEBUG_MODE >= status ):
xbmc.output( "%s: %s\n" % ( ( "INFO", "ERROR", "NOTICE", "DEBUG", )[ status - 1 ], format % args, ) )
def sort_inner(inner):
if("hash" in inner and inner["hash"] == True):
return 100
return inner["percent"]
class GUI( xbmcgui.WindowXMLDialog ):
socket.setdefaulttimeout(10.0) #seconds
def __init__( self, *args, **kwargs ):
pass
def set_filepath( self, path ):
LOG( LOG_INFO, "set_filepath [%s]" , ( path ) )
self.file_original_path = path
self.file_path = path[path.find(os.sep):len(path)]
def set_filehash( self, hash ):
LOG( LOG_INFO, "set_filehash [%s]" , ( hash ) )
self.file_hash = hash
def set_filesize( self, size ):
LOG( LOG_INFO, "set_filesize [%s]" , ( size ) )
self.file_size = size
def set_searchstring( self, search ):
LOG( LOG_INFO, "set_searchstring [%s]" , ( search ) )
self.search_string = search
def set_type( self, type ):
self.file_type = type
def onInit( self ):
LOG( LOG_INFO, "onInit" )
self.setup_all()
if self.file_path:
self.connThread = threading.Thread( target=self.connect, args=() )
self.connThread.start()
def setup_all( self ):
self.setup_variables()
def setup_variables( self ):
self.controlId = -1
self.allow_exception = False
if xbmc.Player().isPlayingVideo():
self.set_filepath( xbmc.Player().getPlayingFile() )
def connect( self ):
self.setup_all()
logging.basicConfig()
self.getControl( LOADING_IMAGE ).setVisible( True )
self.getControl( STATUS_LABEL ).setLabel( "Searching" )
sub_filename = os.path.basename(self.file_original_path)
title = sub_filename[0:sub_filename.rfind(".")]
self.getControl( 180 ).setLabel("[B][UPPERCASE]$LOCALIZE[293]:[/B] " + title + "[/UPPERCASE]");
langs = None
subtitles = []
q = Queue()
self.config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
basepath = "/data/etc" # os.path.dirname(__file__)
self.config.read(basepath + "/.subtitles")
config_plugins = self.config.get("DEFAULT", "plugins")
if(self.file_type == "tv"):
config_plugins = self.config.get("DEFAULT", "tvplugins")
elif(self.file_type == "movie"):
config_plugins = self.config.get("DEFAULT", "movieplugins")
use_plugins = map(lambda x : x.strip(), config_plugins.split(","))
config_langs = self.config.get("DEFAULT", "lang")
if(config_langs != "All" and config_langs != ""):
use_langs = map(lambda x : x.strip(), config_langs.split(","))
else:
use_langs = None
for name in use_plugins:
filep = self.file_original_path
try :
plugin = getattr(plugins, name)(self.config, '/data/hack/cache')
LOG( LOG_INFO, "Searching on %s ", (name) )
thread = threading.Thread(target=plugin.searchInThread, args=(q, str(filep), use_langs))
thread.start()
except ImportError, (e) :
LOG( LOG_INFO, "Plugin %s is not a valid plugin name. Skipping it.", ( e) )
# Get data from the queue and wait till we have a result
count = 0
for name in use_plugins:
subs = q.get(True)
count = count + 1
self.getControl( STATUS_LABEL ).setLabel( "Searching " + str(count) + "/" + str(len(use_plugins)) )
if subs and len(subs) > 0:
if not use_langs:
subtitles += subs
else:
for sub in subs:
lang_code = sub["lang"]
if(lang_code == "p
|
libnano/libnano
|
libnano/fileio/gb_reader_b.py
|
Python
|
gpl-2.0
| 12,973
| 0.004702
|
# -*- coding: utf-8 -*-
"""
http://www.ncbi.nlm.nih.gov/Sitemap/samplerecord.html
http://www.insdc.org/documents/feature_table.html
All keys are native strings, as are values, except the origin which is always
a python 3 byte string (not unicode)
"""
import re
import io
import sys
from collections import OrderedDict
from typing import (
List,
Any
)
def _bytes(x):
return x.encode('utf8') if isinstance(x, str) else x
NEWLINE_STR: str = '\r\n' if sys.platform == 'win32' else '\n'
NEWLINE_BYT: bytes = b'\r\n' if sys.platform == 'win32' else b'\n'
def parse(filepath: str, is_ordered: bool = False) -> dict:
"""
is_ordered == True will retain the order of the qualifiers
"""
d = {b'info': {}}
d_info = d[b'info']
# with io.open(filepath, 'r', encoding='utf-8') as fd:
with io.open(filepath, 'rb') as fd:
raw = fd.read()
start, _, origin = raw.partition(b"ORIGIN")
start, _, features = start.partition(b"FEATURES Location/Qualifiers%s" % (NEWLINE_BYT))
parseLocus(start, d_info)
parseDefinition(start, d_info)
parseAccession(start, d_info)
parseVersion(start, d_info)
parseDBLink(start, d_info)
parseKeywords(start, d_info)
parseSource(start, d_info)
parseOrganism(start, d_info)
d_info[b'references'] = parseReference(start)
_, _, comment = start.partition(b"COMMENT ")
parseComment(d_info, comment)
d[b'features'] = parseFeatures(features, is_ordered)
d[b'seq'] = parseOrigin(origin)
return d
# end def
def parseComment(d: dict, comment: bytes):
if comment != b'':
# get rid of ApE empty comment
if comment.startswith(b"%sCOMMENT " % (NEWLINE_BYT)):
comment = comment[13:]
idx_genome_asm_data = -1
genome_asm_data_newline_count = 0
lines = comment.split(NEWLINE_BYT)
lines = [line.strip() for line in lines]
# print(lines)
# handle ##Genome-Assembly-Data-START## edge case
for i, line in enumerate(lines):
if line == b'':
genome_asm_data_newline_count += 1
elif genome_asm_data_newline_count == 2:
idx_genome_asm_data = i
genome_asm_data_newline_count = 0
else:
genome_asm_data_newline_count = 0
# end for
if idx_genome_asm_data < 0:
d[b'comment'] = b" ".join(lines)
else:
d[b'comment'] = [
b" ".join(lines[:idx_genome_asm_data-2]),
lines[idx_genome_asm_data:-1]
]
# end def
re_locus: List[str] = [
"^LOCUS", # field
" +(?P<name>[\w|.]+)", # name
" +(?P<length>[0-9]+) bp", # sequence length
"(?: +(?P<stranded>[a-z]{2})-)?", # opt: ss, ds, ms
" *(?P<molecule_type>[a-z|A-Z|-|]{2,6})", # molecule type
" +(?P<form>[\w]{6,8})?", # linear or circular
" +(?P<gb_division>[a-z|A-Z]{3})?", # Genbank division
" +(?P<mod_date>[0-9]+-[A-Z]+-[0-9]+)", # modification date
".*%s" % (NEWLINE_STR) # match line end
]
RE_LOCUS: bytes = _bytes("".join(re_locus))
re_definition: List[str] = [
"^DEFINITION", # field
" +(?P<definition>(?:.*%s)(?: .*%s)*)" % (NEWLINE_STR, NEWLINE_STR) # look ahead assertion for multiline
]
RE_DEFINITION: bytes = _bytes("".join(re_definition))
re_accession: List[str] = [
"^ACCESSION", # field
" +(?P<accession>[\w|.]*)" # look ahead assertion for multiline
".*", # match line end
NEWLINE_STR
]
RE_ACCESSION: bytes = _bytes("".join(re_accession))
re_versi
|
on: List[str] = [ "^VERSION", # field
" +(?P<version>[\w|.]+)", # version
" +GI:(?P<GI>[\w|.]+)" # gi field
|
".*", # match line end
NEWLINE_STR
]
RE_VERSION: bytes = _bytes("".join(re_version))
RE_DBLINK: bytes = b"^DBLINK +(?P<dblink>[\w|:| |.]+)" + NEWLINE_BYT
re_keywords: List[str] = [
"^KEYWORDS",
" +(?P<keywords>[\w|.]*)"
".*",
NEWLINE_STR
]
RE_KEYWORDS: bytes = _bytes("".join(re_keywords))
re_source: List[str] = [
"^SOURCE",
" +(?P<source>.*)",
NEWLINE_STR
]
RE_SOURCE: bytes = _bytes("".join(re_source))
re_organism: List[str] = [
"^ ORGANISM", # field
"(?: +(?P<organism0>(?:.*%s))?" % NEWLINE_STR,
"(?: +(?P<organism1>(?:.*%s)(?: .*%s)*))?)" % (NEWLINE_STR, NEWLINE_STR) # multiline
]
RE_ORGANISM: bytes = _bytes("".join(re_organism))
RE_COMP_LOCUS: '_sre.SRE_Pattern' = re.compile(RE_LOCUS, flags=re.M)
def parseLocus(raw: bytes, d_out: dict):
m = re.match(RE_COMP_LOCUS, raw)
d = m.groupdict()
d['length'] = int(d['length'])
for k, v in d.items():
d_out[_bytes(k)] = v
#end def
RE_COMP_DEFINITION: '_sre.SRE_Pattern' = re.compile(RE_DEFINITION, flags=re.M)
def parseDefinition(raw: bytes, d_out: dict):
m = re.search(RE_COMP_DEFINITION, raw)
if m is None:
d_out[b'definition'] = None
else:
d = m.groupdict()
if d['definition'] is not None:
temp_l = d['definition'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
d_out[b'definition'] = b" ".join(temp_l)[:-1]
else:
d_out[b'definition'] = None
#end def
RE_COMP_ACCESSION: '_sre.SRE_Pattern' = re.compile(RE_ACCESSION, flags=re.M)
def parseAccession(raw: bytes, d_out: dict):
m = re.search(RE_COMP_ACCESSION, raw)
if m is None:
d_out[b'accession'] = None
else:
d = m.groupdict()
d_out[b'accession'] = d['accession']
# end def
RE_COMP_VERSION: '_sre.SRE_Pattern' = re.compile(RE_VERSION, flags=re.M)
def parseVersion(raw: bytes, d_out: dict):
m = re.search(RE_COMP_VERSION, raw)
if m is None:
d_out[b'version'] = None
else:
d = m.groupdict()
d_out[b'version'] = d['version']
d_out[b'GI'] = d['GI']
# end def
RE_COMP_DBLINK: '_sre.SRE_Pattern' = re.compile(RE_DBLINK, flags=re.M)
def parseDBLink(raw: bytes, d_out: dict):
m = re.search(RE_COMP_DBLINK, raw)
if m is None:
d_out[b'dblink'] = None
else:
d = m.groupdict()
d_out[b'dblink'] = d['dblink']
# end def
RE_COMP_KEYWORDS: '_sre.SRE_Pattern' = re.compile(RE_KEYWORDS, flags=re.M)
def parseKeywords(raw: bytes, d_out: dict):
m = re.search(RE_COMP_KEYWORDS, raw)
if m is None:
d_out[b'keywords'] = None
else:
d = m.groupdict()
d_out[b'keywords'] = d['keywords']
# end def
RE_COMP_SOURCE: '_sre.SRE_Pattern' = re.compile(RE_SOURCE, flags=re.M)
def parseSource(raw: bytes, d_out: dict):
m = re.search(RE_COMP_SOURCE, raw)
if m is None:
d_out[b'source'] = None
else:
d = m.groupdict()
d_out[b'source'] = d['source']
# end def
RE_COMP_ORGANISM: '_sre.SRE_Pattern' = re.compile(RE_ORGANISM, flags=re.M)
def parseOrganism(raw: bytes, d_out: dict):
m = re.search(RE_COMP_ORGANISM, raw)
if m is None:
d_out[b'organism'] = [None, None]
else:
d = m.groupdict()
temp_l = d['organism0'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
org0 = b" ".join(temp_l)[:-1]
org1 = None
if d['organism1'] is not None:
temp_l = d['organism1'].split(NEWLINE_BYT)
temp_l = [x.strip() for x in temp_l]
org1 = b" ".join(temp_l)[:-1]
d_out[b'organism'] = [org0, org1]
# end def
"""
REFERENCE 1 (bases 1 to 5028)
AUTHORS Torpey,L.E., Gibbs,P.E., Nelson,J. and Lawrence,C.W.
TITLE Cloning and sequence of REV7, a gene whose function is required for
DNA damage-induced mutagenesis in Saccharomyces cerevisiae
JOURNAL Yeast 10 (11), 1503-1509 (1994)
PUBMED 7871890
"""
re_reference: List[str] = [
"^REFERENCE",
" +(?P<r_index>[0-9]+)(?: +\(bases (?P<start_idx>[0-9]+) to (?P<end_idx>[0-9]+)\)){0,1}",
".*",
NEWLINE_STR,
|
brianhouse/wavefarm
|
granu/braid/core.py
|
Python
|
gpl-3.0
| 2,215
| 0.005869
|
#!/usr/bin/env python3
import time, threading, queue
from .util import osc, log
class Driver(object):
""" This is a substitute for a realtime system """
def __init__(self):
self.voices = []
self.grain = 0.01 # hundredths are nailed by Granu, w/o load. ms are ignored.
self.t = 0.0
self.previous_t = 0.0
self.callbacks = []
self.running = True
def start(self, skip=0):
start_t = time.time() - skip
last_cue = -1
while self.running:
self.t = time.time() - start_t
if int(self.t) // 15 != last_cue:
last_cue = int(self.t) // 15
log.info("/////////////// [%s] %d:%f ///////////////" % (last_cue, self.t // 60.0, self.t % 60.0))
self._perform_callbacks()
if not self.running:
break
delta_t = self.t - self.previous_t
for voice in self.voices:
voice.update(delta_t)
self.previous_t = self.t
time.sleep(self.grain)
def stop(self):
self.running = False
for voice in self.voices:
voice.end()
log.info("/////////////// END %d:%f ///////////////" % (self.t // 60.0, self.t % 60.0))
time.sleep(1) # for osc to finish
def callback(self, f, t):
t += self.t
self.callbacks.append((f, t))
def _perform_callbacks(self):
for c, callback in enumerate(self.callbacks):
f, t = callback
if t <= self.t:
f()
self.callbacks.remove(callback)
class Synth(threading.Thread):
"""Consume notes and send OSC"""
def __init__(self):
threading.Thread.__init__(self)
self.daemon
|
= True
self.msp_sender = osc.Sender(5280)
self.queue = queue.Queue()
self.start()
def send(self, address, *params):
self.queue.put((address, params))
def run(self):
while True:
address, params = self.queue.get()
se
|
lf.msp_sender.send(address, params)
synth = Synth() # player singleton
driver = Driver()
|
Sbalbp/DIRAC
|
FrameworkSystem/Client/SiteMapClient.py
|
Python
|
gpl-3.0
| 1,184
| 0.033784
|
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
######################################################################
|
#####
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime =
|
time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
wakatime/komodo-wakatime
|
components/wakatime/offlinequeue.py
|
Python
|
bsd-3-clause
| 3,427
| 0.000875
|
# -*- coding: utf-8 -*-
"""
wakatime.offlinequeue
~~~~~~~~~~~~~~~~~~~~~
Queue for saving heartbeats while offline.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from time import sleep
from .compat import json
from .constants import DEFAULT_SYNC_OFFLINE_ACTIVITY, HEARTBEATS_PER_REQUEST
from .heartbeat import Heartbeat
try:
import sqlite3
HAS_SQL = True
except ImportError: # pragma: nocover
HAS_SQL = False
log = logging.getLogger('WakaTime')
class Queue(object):
db_file = '.wakatime.db'
table_name = 'heartbeat_2'
args = None
configs = None
def __init__(self, args, configs):
self.args = args
self.configs = configs
def connect(self):
conn = sqlite3.connect(self._get_db_file(), isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
id text,
heartbeat text)
'''.format(self.table_name))
return (conn, c)
def push(self, heartbeat):
if not HAS_SQL:
return
try:
conn, c = self.connect()
data = {
'id': heartbeat.get_id(),
'heartbeat': heartbeat.json(),
}
c.execute('INSERT INTO {0} VALUES (:id,:heartbeat)'.format(self.table_name), data)
conn.commit()
conn.close()
except sqlite3.Error:
log.traceback()
def pop(self):
if not HAS_SQL:
return None
tries = 3
wait = 0.1
try:
conn, c = self.connect()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return None
heartbeat = None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
|
if row is not None:
id = row[0]
heartbeat = Heartbeat(json.loads(row[1]), self.args, self.configs, _clone=True)
c.execute('DELETE FROM {0} WHERE id=?'.format(self.table_name), [id])
conn.commit()
|
loop = False
except sqlite3.Error:
log.traceback(logging.DEBUG)
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.traceback(logging.DEBUG)
return heartbeat
def push_many(self, heartbeats):
for heartbeat in heartbeats:
self.push(heartbeat)
def pop_many(self, limit=None):
if limit is None:
limit = DEFAULT_SYNC_OFFLINE_ACTIVITY
heartbeats = []
count = 0
while count < limit:
heartbeat = self.pop()
if not heartbeat:
break
heartbeats.append(heartbeat)
count += 1
if count % HEARTBEATS_PER_REQUEST == 0:
yield heartbeats
heartbeats = []
if heartbeats:
yield heartbeats
def _get_db_file(self):
home = '~'
if os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
return os.path.join(os.path.expanduser(home), '.wakatime.db')
|
bobbyxuy/flask_web
|
app/auth/views.py
|
Python
|
mit
| 1,344
| 0.001488
|
from flask import render_template, redirect, request, url_for, flash
from . import auth
from ..models import User
from .forms import LoginForm, RegistrationForm
from flask_login import login_user, logout_user, login_required, current_user
from .. import db
from ..email import send_email
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
|
login_user(user, form.remember_me.data)
r
|
eturn redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('you have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
flash('You can now login.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
|
StrasWeb/picomon
|
picomon/__init__.py
|
Python
|
gpl-3.0
| 2,005
| 0
|
import socket
from .attrtree import AttrTree
from .checks import Checks
config = AttrTree()
# the list of checks
config.install_attr('checks', Checks())
# This is the base granularity (in seconds) for polling
# Each check may then individually be configured to run every N * tick
config.install_attr('base_tick', 60)
# Default "every" check parameter, can be overridden on a per-check basis
config.install_attr('default_every', 1)
# Default "error_every" (how often we retry checks that are in error) parameter
# -1 disables feature (same as regular "every"), can be also be overridden
config.install_attr('default_error_every', -1)
# Verbosity level (one of CRITICAL, ERROR, WARNING, INFO, DEBUG)
config.install_attr('verb_level', 'INFO')
# Email addresses to send to when an alert is triggered
config.install_attr('emails.to', [])
# The From: address
config.install_attr('emails.addr_from',
'Picomon <picomon@%s>' % socket.getfqdn())
# The SMTP host, with optional :port suffix
config.install_attr('emails.smtp_host', 'localhost:25')
# The inactive timeout after which to close the SMTP connection
config.install_attr('emails.smtp_keepalive_timeout', 60)
# Timeout after which to retry sending emails after a failure
config.install_attr('emails.smtp_retry_timeout', 60)
# Interval in seconds between global reports when some checks are in error
# 0 disables reports
config
|
.install_attr('emails.report.every', 0)
# Subject template for state change email notifications
# available substitutions:
# - state ("Pro
|
blem" or "OK")
# - check (check's name, like "CheckDNSRec6")
# - dest (the target of the check ie. an IP or a Host's 'name'
# parameter)
config.install_attr('emails.subject_tpl',
'[DOMAIN] {state}: {check} on {dest}')
# reports email subject
config.install_attr('emails.report.subject', '[DOMAIN] Picomon error report')
# watchdog error email subject
config.install_attr('emails.watchdog_subject', '[DOMAIN] Picomon stopped')
|
dani882/edX---Computer-Science
|
python/lecture 3.2.py
|
Python
|
gpl-2.0
| 336
| 0.002976
|
__author__ = 'dani882'
# lecture 3.2, slide
|
6
# Find the cube root of a perfect cube
x = int(raw_input('Enter an integer: '))
ans = 0
while ans**3 < abs(x):
ans = ans + 1
if ans**3 != abs(x):
print(str(x) + ' is not a perfect cube')
else:
if x < 0:
ans = -ans
print('C
|
ube root of ' + str(x) + ' is ' + str(ans))
|
atmb4u/marlin
|
marlin/__init__.py
|
Python
|
bsd-3-clause
| 13
| 0.076923
|
im
|
port marlin
| |
mullikine/ranger
|
ranger/container/settings.py
|
Python
|
gpl-3.0
| 8,531
| 0.001524
|
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@lepus.uberspace.de>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:
return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except
|
:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
|
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/services/librarianserver/tests/test_db_outage.py
|
Python
|
agpl-3.0
| 3,628
| 0
|
# Copyright 2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test behavior of the Librarian during a database outage.
Database outages happen by accident and during fastdowntime deployments."""
__metaclass__ = type
from cStringIO import StringIO
import urllib2
from fixtures import Fixture
from lp.services.librarian.client import LibrarianClient
from lp.services.librarianserver.testing.server import LibrarianServerFixture
from lp.testing import TestCase
from lp.testing.fixture import PGBouncerFixture
from lp.testing.layers import (
BaseLayer,
DatabaseFunctionalLayer,
)
class PGBouncerLibrarianLayer(DatabaseFunctionalLayer):
"""Custom layer for TestLibrarianDBOutage.
We are using a custom layer instead of standard setUp/tearDown to
avoid the lengthy Librarian startup time, and to cope with undoing
changes made to BaseLayer.config_fixture to allow access to the
Librarian we just started up.
"""
pgbouncer_fixture = None
librarian_fixture = None
@classmethod
def setUp(cls):
# Fixture to hold other fixtures.
cls._fixture = Fixture()
cls._fixture.setUp()
cls.pgbouncer_fixture = PGBouncerFixture()
# Install the PGBouncer fixture so we shut it down to
# create database outages.
cls._fixture.useFixture(cls.pgbouncer_fixture)
# Bring up the Librarian, which will be connecting via
# pgbouncer.
cls.librarian_fixture = LibrarianServerFixture(
BaseLayer.config_fixture)
cls._fixture.useFixture(cls.librarian_fixture)
@classmethod
def tearDown(cls):
cls.pgbouncer_fixture = None
cls.librarian_fixture = None
cls._fixture.cleanUp()
@classmethod
def testSetUp(cls):
cls.pgbouncer_fixture.start()
class TestLibrarianDBOutage(TestCase):
layer = PGBouncerLibrarianLayer
def setUp(self):
super(TestLibrarianDBOutage, self).setUp()
self.pgbouncer = PGBouncerLibrarianLayer.pgbouncer_fixture
self.client = LibrarianClient()
# Add a file to the Librarian so we can download it.
self.url = self._makeLibraryFileUrl()
def _makeLibraryFileUrl(self):
data = 'whatever'
return self.client.remoteAddFile(
'foo.txt', len(data), StringIO(data), 'text/plain')
def getErrorCode(self):
# We need to talk to every Librarian thread to ensure all the
# Librarian database connections are in a known state.
# XXX StuartBishop 2011-09-01 bug=840046: 20 might be overkill
# for the test run, but we have no real way of knowing how many
# connections are in use.
num_librarian_threads = 20
codes = set()
for count in range(num_librarian_threads):
try:
urllib2.urlopen(self.url).read()
codes.add(200)
except urllib2.HTTPError as error:
codes.add(error.c
|
ode)
self.assertTrue(len(codes) == 1, 'Mixed responses: %s' % str(codes))
return codes.pop()
def test_outage(self):
# Everything should be working fine to start with.
self.assertEqual(self.getErrorCode(), 200)
# When the outage kicks in, we start getting 503 responses
# instead of 200 and 404s.
self.pgbouncer.stop()
self.assertEqual(self.getError
|
Code(), 503)
# When the outage is over, things are back to normal.
self.pgbouncer.start()
self.assertEqual(self.getErrorCode(), 200)
|
geonetix/simplemq
|
simplequeue/__init__.py
|
Python
|
mit
| 71
| 0
|
from simplequeue.lib.configuration import config
__all__ = ['conf
|
ig']
| |
iw3hxn/LibrERP
|
sale_order_version/models/__init__.py
|
Python
|
agpl-3.0
| 1,120
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2014 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it unde
|
r the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more detai
|
ls.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import sale_order
from . import sale_order_line
from . import sale_shop
from . import sale_monkey
|
akx/shoop
|
_misc/ensure_license_headers.py
|
Python
|
agpl-3.0
| 4,075
| 0.001227
|
#!/usr/bin/env python3
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
License header updater.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import sanity_utils
HEADER = """
This file is part of Shoop.
Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
This source code is licensed under the AGPLv3 license found in the
LICENSE file in the root directory of this source tree.
""".strip()
PY_HEADER = '\n'.join(('# ' + line).strip() for line in HEADER.splitlines())
JS_HEADER = (
'/**\n' +
'\n'.join((' * ' + line).rstrip() for line in HEADER.splitlines()) +
'\n */')
PY_HEADER_LINES = PY_HEADER.encode('utf-8').splitlines()
JS_HEADER_LINES = JS_HEADER.encode('utf-8').splitlines()
def get_adders():
return {
'.py': add_header_to_python_file,
'.js': add_header_to_javascript_file
}
def main():
ap = argparse.ArgumentParser()
ap.add_argument("root", nargs="+", help="Directory roots to recurse through")
ap.add_argument("-w", "--write", help="Ac
|
tually write changes", action="store_true")
ap.add_argument("-s", "--exit-status", help="Exit with error status when missing headers", action="store_true")
ap.add_argument("-v", "--verbose", help="Log OK files too", action="store_true")
args = ap.parse_args()
adders = get_adders()
paths = find_files(roots=args.root, extensions=set(adders.keys()))
missing = process_files(paths, adders, verbose=args.verbose
|
, write=args.write)
if args.exit_status and missing:
return 1
return 0
def process_files(paths, adders, verbose, write):
width = max(len(s) for s in paths)
missing = set()
for path in sorted(paths):
if os.stat(path).st_size == 0:
if verbose:
print('[+]:%-*s: File is empty' % (width, path))
elif not has_header(path):
missing.add(path)
if write:
adder = adders[os.path.splitext(path)[1]]
adder(path)
print('[!]:%-*s: Modified' % (width, path))
else:
print('[!]:%-*s: Requires license header' % (width, path))
else:
if verbose:
print('[+]:%-*s: File has license header' % (width, path))
return missing
def find_files(roots, extensions):
paths = set()
generated_resources = set()
for root in roots:
for file in sanity_utils.find_files(
root,
generated_resources=generated_resources,
allowed_extensions=extensions,
ignored_dirs=sanity_utils.IGNORED_DIRS + ["migrations"]
):
if not is_file_ignored(file):
paths.add(file)
paths -= generated_resources
return paths
def is_file_ignored(filepath):
filepath = filepath.replace(os.sep, "/")
return (
('vendor' in filepath) or
('doc/_ext/djangodocs.py' in filepath)
)
def has_header(path):
with open(path, 'rb') as fp:
return b"This file is part of Shoop." in fp.read(256)
def add_header_to_python_file(path):
lines = get_lines(path)
if lines:
i = 0
if lines[i].startswith(b'#!'):
i += 1
if i < len(lines) and b'coding' in lines[i]:
i += 1
new_lines = lines[:i] + PY_HEADER_LINES + lines[i:]
write_lines(path, new_lines)
def add_header_to_javascript_file(path):
lines = get_lines(path)
if lines:
new_lines = JS_HEADER_LINES + lines
write_lines(path, new_lines)
def get_lines(path):
with open(path, 'rb') as fp:
contents = fp.read()
if not contents.strip():
return []
return contents.splitlines()
def write_lines(path, new_lines):
with open(path, 'wb') as fp:
for line in new_lines:
fp.write(line + b'\n')
if __name__ == '__main__':
sys.exit(main())
|
applecool/Practice
|
Python/Sorting/RadixSort.py
|
Python
|
mit
| 556
| 0.082734
|
#implementation of radix sort in Python.
def RadixSort(A):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
buckets = [list() for _ in range(RADIX)]
for i in A:
tmp = i / placement
buckets[tmp % RADIX].append(i)
if maxLength and tmp > 0:
maxLength = False
a = 0
for b in range(RADI
|
X):
buck = buckets[b]
for i i
|
n buck:
A[a] = i
a += 1
# move to next digit
placement *= RADIX
A = [534, 246, 933, 127, 277, 321, 454, 565, 220]
print(RadixSort(A))
|
msultan/mdtraj
|
mdtraj/nmr/scalar_couplings.py
|
Python
|
lgpl-2.1
| 8,005
| 0.001376
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, TJ Lane, Osama El-Gabalawy
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
"""
This file contains scripts for calculating scalar (J) Couplings from backbone dihedrals.
"""
##############################################################################
# Imports
##############################################################################
import numpy as np
from mdtraj.geometry import compute_phi
##############################################################################
# Globals
##############################################################################
J3_HN_CB_coefficients = { # See full citations below in docstring references.
"Bax2007": dict(phi0=+60 * np.pi/180., A=3.71, B=-0.59, C=0.08), # From Table 1. in paper
}
J3_HN_CB_uncertainties = {
# Values in [Hz]
"Bax2007": 0.22,
}
J3_HN_C_coefficients = { # See full citations below in docstring references.
"Bax2007": dict(phi0=+180 * np.pi/180., A=4.36, B=-1.08, C=-0.01), # From Table 1. in paper
}
J3_HN_C_uncertainties = {
# Values in [Hz]
"Bax2007": 0.30,
}
J3_HN_HA_coefficients = { # See full citations below in docstring references.
"Ruterjans1999": dict(phi0=-60 * np.pi/180., A=7.90, B=-1.05, C=0.65), # From Table 1. in paper.
"Bax2007": dict(phi0=-60 * np.pi/180., A=8.4, B=-1.36, C=0.33), # From Table 1. in paper
"Bax1997": dict(phi0=-60 * np.pi/180., A=7.09, B=-1.42, C=1.55), # From Table 2. in paper
}
J3_HN_HA_uncertainties = {
# Values in [Hz]
"Ruterjans1999": 0.25,
"Bax2007": 0.36,
"Bax1997": 0.39
}
##############################################################################
# Functions
##############################################################################
def _J3_function(phi, A, B, C, phi0):
"""Return a scalar couplings with a given choice of karplus coeffici
|
ents. USES RADIANS!"""
return A * np.cos(phi + phi0) ** 2. + B * np.cos(phi + phi0) + C
def compute_J3_HN_HA(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and H_alpha.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_H
|
A for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007, Bax1999,
or Ruterjans1999
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_HA, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Schmidt, J. M., Blümel, M., Löhr, F., & Rüterjans, H.
"Self-consistent 3J coupling analysis for the joint calibration
of Karplus coefficients and evaluation of torsion angles."
J. Biomol. NMR, 14, 1 1-12 (1999)
.. [2] Vögeli, B., Ying, J., Grishaev, A., & Bax, A.
"Limits on variations in protein backbone dynamics from precise
measurements of scalar couplings."
J. Am. Chem. Soc., 129(30), 9377-9385 (2007)
.. [3] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_HA_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_HA_coefficients.keys()))
J = _J3_function(phi, **J3_HN_HA_coefficients[model])
return indices, J
def compute_J3_HN_C(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and C_prime.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_C for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_C, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_C_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_C_coefficients.keys()))
J = _J3_function(phi, **J3_HN_C_coefficients[model])
return indices, J
def compute_J3_HN_CB(traj, model="Bax2007"):
"""Calculate the scalar coupling between HN and C_beta.
This function does not take into account periodic boundary conditions (it
will give spurious results if the three atoms which make up any angle jump
across a PBC (are not "wholed"))
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to compute J3_HN_CB for
model : string, optional, default="Bax2007"
Which scalar coupling model to use. Must be one of Bax2007
Returns
-------
indices : np.ndarray, shape=(n_phi, 4), dtype=int
Atom indices (zero-based) of the phi dihedrals
J : np.ndarray, shape=(n_frames, n_phi)
Scalar couplings (J3_HN_CB, in [Hz]) of this trajectory.
`J[k]` corresponds to the phi dihedral associated with
atoms `indices[k]`
Notes
-----
The coefficients are taken from the references below--please cite them.
References
----------
.. [1] Hu, J. S., & Bax, A.
"Determination of ϕ and ξ1 Angles in Proteins from 13C-13C
Three-Bond J Couplings Measured by Three-Dimensional Heteronuclear NMR.
How Planar Is the Peptide Bond?."
J. Am. Chem. Soc., 119(27), 6360-6368 (1997)
"""
indices, phi = compute_phi(traj)
if model not in J3_HN_CB_coefficients:
raise(KeyError("model must be one of %s" % J3_HN_CB_coefficients.keys()))
J = _J3_function(phi, **J3_HN_CB_coefficients[model])
return indices, J
|
HalCanary/skia-hc
|
tools/skp/page_sets/skia_cnn_desktop.py
|
Python
|
bsd-3-clause
| 1,145
| 0.00524
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_cnn_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(15)
class SkiaCnnDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaCnnDesktopPageSet, self).__init__(
archive_data_file='data/skia_cnn_d
|
esktop.json')
urls_list = [
# go/skia-skps-3-2019
'http://www.cnn.com',
]
for url in urls_list:
self.AddStory(SkiaDesktopPage(url,
|
self))
|
nghiattran/generator-python-parse
|
generators/endpoint/templates/model_template.py
|
Python
|
mit
| 201
| 0.014925
|
#
|
@name <%= app_name %>
# @description
# Models for UserControler.
import json
from src.models import BaseModel
class <%= endpoint %>Model(BaseModel):
_parse_class_n
|
ame = '<%= table %>'
pass
|
KnockSoftware/whitenoise
|
whitenoise/__init__.py
|
Python
|
mit
| 118
| 0
|
from __future__ import absolute_import
from .base import WhiteNoise
__ve
|
rsion__ = '2.0.3'
__all
|
__ = ['WhiteNoise']
|
myersjustinc/django-calaccess-campaign-browser
|
calaccess_campaign_browser/api.py
|
Python
|
mit
| 545
| 0
|
from tastypie.resources import ModelResource, ALL
from .models impor
|
t Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filer_id_raw': ALL}
excludes = ['id']
class FilingResource(ModelResource):
class Meta:
|
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filing_id_raw': ALL}
excludes = ['id']
|
jamesiter/jimauth
|
views/user_mgmt.py
|
Python
|
gpl-3.0
| 14,179
| 0.001987
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from flask import Blueprint, request
import jimit as ji
from models import UidOpenidMapping
from models import Utils, Rules, User
__author__ = 'James Iter'
__date__ = '16/6/8'
__contact__ = 'james.iter.cn@gmail.com'
__copyright__ = '(c) 2016 by James Iter.'
blueprint = Blueprint(
'user_mgmt',
__name__,
url_prefix='/api/user_mgmt'
)
blueprints = Blueprint(
'users_mgmt',
__name__,
url_prefix='/api/users_mgmt'
)
@Utils.dumps2response
@Utils.superuser
def r_get(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
user.get()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = user.__dict__
del ret['data']['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_get_by_login_name(login_name=None):
user = User()
args_rules = [
Rules.LOGIN_NAME.value
]
user.login_name = login_name
try:
ji.Check.previewing(args_rules, user.__dict__)
user.get_by('login_name')
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = user.__dict__
del ret['data']['password']
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_enable(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.ENABLED.value
]
user.enabled = True
try:
ji.Check.previewing(args_rules, user.__dict__)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_disable(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(user.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.ENABLED.value
]
user.enabled = False
try:
ji.Check.previewing(args_rules, user.__dict__)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_delete(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.id = long(use
|
r.id)
if user.id == 1:
ret = dict()
ret['state'] = ji.Common.exchange_state(40301)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
user.delete
|
()
# 删除依赖于该用户的openid
UidOpenidMapping.delete_by_filter('uid:in:' + _id)
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_update(_id):
user = User()
args_rules = [
Rules.UID.value
]
if 'login_name' in request.json:
args_rules.append(
Rules.LOGIN_NAME.value
)
if 'mobile_phone' in request.json:
args_rules.append(
Rules.MOBILE_PHONE.value
)
if 'mobile_phone_verified' in request.json:
args_rules.append(
Rules.MOBILE_PHONE_VERIFIED.value
)
if 'email' in request.json:
args_rules.append(
Rules.EMAIL.value
)
if 'email_verified' in request.json:
args_rules.append(
Rules.EMAIL_VERIFIED.value
)
if 'role_id' in request.json:
request.json['role_id'] = request.json['role_id'].__str__()
args_rules.append(
Rules.ROLE_ID_EXT.value
)
if args_rules.__len__() < 2:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
request.json['id'] = _id
try:
ji.Check.previewing(args_rules, request.json)
user.id = int(request.json.get('id'))
user.get()
user.login_name = request.json.get('login_name', user.login_name)
user.mobile_phone = request.json.get('mobile_phone', user.mobile_phone)
user.mobile_phone_verified = request.json.get('mobile_phone_verified', user.mobile_phone_verified)
user.email = request.json.get('email', user.email)
user.email_verified = request.json.get('email_verified', user.email_verified)
user.role_id = int(request.json.get('role_id', user.role_id))
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_change_password(_id):
user = User()
args_rules = [
Rules.UID.value
]
user.id = _id
try:
ji.Check.previewing(args_rules, user.__dict__)
user.get()
except ji.PreviewingError, e:
return json.loads(e.message)
args_rules = [
Rules.PASSWORD.value
]
user.password = request.json.get('password')
try:
ji.Check.previewing(args_rules, user.__dict__)
user.password = ji.Security.ji_pbkdf2(user.password)
user.update()
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
@Utils.superuser
def r_get_by_filter():
page = str(request.args.get('page', 1))
page_size = str(request.args.get('page_size', 50))
args_rules = [
Rules.PAGE.value,
Rules.PAGE_SIZE.value
]
try:
ji.Check.previewing(args_rules, {'page': page, 'page_size': page_size})
except ji.PreviewingError, e:
return json.loads(e.message)
page = int(page)
page_size = int(page_size)
# 把page和page_size换算成offset和limit
offset = (page - 1) * page_size
# offset, limit将覆盖page及page_size的影响
offset = str(request.args.get('offset', offset))
limit = str(request.args.get('limit', page_size))
order_by = request.args.get('order_by', 'id')
order = request.args.get('order', 'asc')
filter_str = request.args.get('filter', '')
args_rules = [
Rules.OFFSET.value,
Rules.LIMIT.value,
Rules.ORDER_BY.value,
Rules.ORDER.value
]
try:
ji.Check.previewing(args_rules, {'offset': offset, 'limit': limit, 'order_by': order_by, 'order': order})
offset = int(offset)
limit = int(limit)
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
ret['paging'] = {'total': 0, 'offset': offset, 'limit': limit, 'page': page, 'page_size': page_size,
'next': '', 'prev': '', 'first': '', 'last': ''}
ret['data'], ret['paging']['total'] = User.get_by_filter(offset=offset, limit=limit, order_by=order_by,
order=order, filter_str=filter_str)
host_url = request.host_url.rstrip('/')
other_str = '&filter=' + filter_str + '&order=' + order + '&order_by=' + order_by
last_pagination = (ret['paging']['total'] + page_size - 1) / page_size
if page <= 1:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '?page=1&page_size=' + page_size.__str__() + \
other_str
else:
ret['paging']['prev'] = host_url + blueprints.url_prefix + '?page=' + str(page-1) + '&page_si
|
rafaelribeiroo/ensinoDistancia
|
src/apps/courses/migrations/0002_auto_20171103_0057.py
|
Python
|
gpl-3.0
| 685
| 0.00292
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-03 00:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='courses/images', verbose_name='Imagem'),
),
migrations.AlterField(
model_name='course',
name
|
='name',
|
field=models.CharField(max_length=100, unique=True, verbose_name='Nome'),
),
]
|
nafitzgerald/allennlp
|
setup.py
|
Python
|
apache-2.0
| 3,457
| 0.002025
|
"""
In order to create a package for pypi, you need to follow several steps.
1. Create a .pypirc in your home directory. It should look like this:
```
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=allennlp
password= Get the password from LastPass.
[pypitest]
repository=https://test.pypi.org/legacy/
username=allennlp
password= Get the password from LastPass.
```
run chmod 600 ./pypirc so only you can read/write.
1. Change the version in docs/conf.py and setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi allennlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from setuptools import setup, find_packages
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
VERSION = '0.3.1-unreleased'
setup(name='allennlp',
version=VERSION,
description='An open-source NLP research library, built on PyTorch.',
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='allennlp NLP deep learning machine reading',
url='https://github.com/allenai/allennlp',
author='Allen Institute for Artificial Intelligence',
author_email='allennlp@allenai.org',
license='Apache',
packages=find_packages(),
install_requires=[
'pyhocon==0.3.35',
'typing',
'overrides',
'nltk',
'spacy>=2.0,<2.1',
|
'numpy',
'tensorboard',
'cffi==1.11.2',
'awscli>=1.11.91',
'flask==0.12.1',
'flask-cors==3.0.3',
'psycopg2',
'argparse',
'requests>=2.18',
'tqdm',
'editdistance',
'jupyter',
'h5py',
'scikit-
|
learn',
'scipy',
'pytz==2017.3'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
python_requires='>=3.6',
zip_safe=False)
|
fccagou/tools
|
python/sql/csv-to-sql.py
|
Python
|
gpl-2.0
| 537
| 0.013035
|
import csv, sqlite3
con = sqlite3.connect("toto.db") # change to 'sqlite:///your_filename.db'
cur = con.cursor()
cur.execute("CREATE TABLE t (col1, col2)
|
;") # use your column names here
with open('data.csv','r') as fin: # `with` statement available in 2.5+
# csv.DictReader uses first line in file for column headings by default
dr = csv.DictReader(fin) # comma is default delimiter
to_db = [(i['col1'], i['col2']) for i in dr]
cur.executemany("INSERT INTO t (col1, col2) VALUES (?, ?);
|
", to_db)
con.commit()
con.close()
|
itaiin/arrow
|
python/pyarrow/types.py
|
Python
|
apache-2.0
| 6,569
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tools for dealing with Arrow type metadata in Python
from pyarrow.lib import (is_boolean_value, # noqa
is_integer_value,
is_float_value)
import pyarrow.lib as lib
_SIGNED_INTEGER_TYPES = {lib.Type_INT8, lib.Type_INT16, lib.Type_INT32,
lib.Type_INT64}
_UNSIGNED_INTEGER_TYPES = {lib.Type_UINT8, lib.Type_UINT16, lib.Type_UINT32,
lib.Type_UINT64}
_INTEGER_TYPES = _SIGNED_INTEGER_TYPES | _UNSIGNED_INTEGER_TYPES
_FLOATING_TYPES = {lib.Type_HALF_FLOAT, lib.Type_FLOAT, lib.Type_DOUBLE}
_DATE_TYPES = {lib.Type_DATE32, lib.Type_DATE64}
_TIME_TYPES = {lib.Type_TIME32, lib.Type_TIME64}
_TEMPORAL_TYPES = {lib.Type_TIMESTAMP} | _TIME_TYPES | _DATE_TYPES
_NESTED_TYPES = {lib.Type_LIST, lib.Type_STRUCT, lib.Type_UNION, lib.Type_MAP}
def is_null(t):
"""
Return True if value is an instance of a null type
"""
return t.id == lib.Type_NA
def is_boolean(t):
"""
Return True if value is an instance of a boolean type
"""
return t.id == lib.Type_BOOL
def is_integer(t):
"""
Return True if value is an instance of any integer type
"""
return t.id in _INTEGER_TYPES
def is_signed_integer(t):
"""
Return True if value is an instance of any signed integer type
"""
return t.id in _SIGNED_INTEGER_TYPES
def is_unsigned_integer(t):
"""
Return True if value is an instance of any unsigned integer type
"""
return t.id in _UNSIGNED_INTEGER_TYPES
def is_int8(t):
"""
Return True if value is an instance of an int8 type
"""
return t.id == lib.Type_INT8
def is_int16(t):
"""
Return True if value is an instance of an int16 type
"""
return t.id == lib.Type_INT16
def is_int32(t):
"""
Return True if value is an instance of an int32 type
"""
return t.id == lib.Type_INT32
def is_int64(t):
"""
Return True if value is an instance of an int64 type
"""
return t.id == lib.Type_INT64
def is_uint8(t):
"""
Return True if value is an instance of an uint8 type
"""
return t.id == lib.Type_UINT8
def is_uint16(t):
"""
Return True if value is an instance of an uint16 type
"""
return t.id == lib.Type_UINT16
def is_uint32(t):
"""
Return True if value is an instance of an uint32 type
"""
return t.id == lib.Type_UINT32
def is_uint64(t):
"""
Return True if value is an instance of an uint64 type
"""
return t.id == lib.Type_UINT64
def is_floating(t):
"""
Return True if value is an instance of a floating point numeric type
"""
return t.id in _FLOATING_TYPES
def is_float16(t):
"""
Return True if value is an instance of an float16 (half-precision) type
"""
return t.id == lib.Type_HALF_FLOAT
def is_float32(t):
"""
Return True if value is an instance of an float32 (single precision) type
"""
return t.id == lib.Type_FLOAT
def is_float64(t):
"""
Return True if value is an instance of an float64 (double precision) type
"""
return t.id == lib.Type_DOUBLE
def is_list(t):
"""
Return True if value is an instance of a list type
"""
return t.id == lib.Type_LIST
def is_struct(t):
"""
Return True if value is an instance of a struct type
"""
return t.id == lib.Type_STRUCT
def is_union(t):
"""
Return True if value is an instance of a union type
"""
return t.id == lib.Type_UNION
def is_nested(t):
"""
Return True if value is an instance of a nested type
"""
return t.id in _NESTED_TYPES
def is_temporal(t):
"""
Return True if value is an instance of a temporal (date, time, timestamp)
type
"""
return t.id in _TEMPORAL_TYPES
def is_timestamp(t):
"""
Return True if value is an instance of a timestamp type
"""
return t.id == lib.Type_TIMESTAMP
def is_time(t):
"""
Return True if value is an instance of a time type
"""
return t.id in _TIME_TYPES
def is_time32(t):
"
|
""
Return True if value is an instance of a time32 type
"""
return t.id == lib.Type_TIME32
def is_time64(t):
"""
Return True if value is an instance of a time64 type
"""
return t.id == lib.Type_TIME64
def is_binary(t):
"""
Return True if value is an instance of a variable-length binary type
"""
return t.id == lib.Type_BINARY
def is_unicode(t):
"""
Alias for is_stri
|
ng
"""
return is_string(t)
def is_string(t):
"""
Return True if value is an instance of string (utf8 unicode) type
"""
return t.id == lib.Type_STRING
def is_fixed_size_binary(t):
"""
Return True if value is an instance of a fixed size binary type
"""
return t.id == lib.Type_FIXED_SIZE_BINARY
def is_date(t):
"""
Return True if value is an instance of a date type
"""
return t.id in _DATE_TYPES
def is_date32(t):
"""
Return True if value is an instance of a date32 (days) type
"""
return t.id == lib.Type_DATE32
def is_date64(t):
"""
Return True if value is an instance of a date64 (milliseconds) type
"""
return t.id == lib.Type_DATE64
def is_map(t):
"""
Return True if value is an instance of a map logical type
"""
return t.id == lib.Type_MAP
def is_decimal(t):
"""
Return True if value is an instance of a decimal type
"""
return t.id == lib.Type_DECIMAL
def is_dictionary(t):
"""
Return True if value is an instance of a dictionary-encoded type
"""
return t.id == lib.Type_DICTIONARY
def is_primitive(t):
"""
Return True if the value is an instance of a primitive type
"""
return lib._is_primitive(t.id)
|
jawilson/home-assistant
|
homeassistant/components/energy/__init__.py
|
Python
|
apache-2.0
| 1,029
| 0.000972
|
"""The Energy integration."""
from __future__ import annotations
from homeassistant.components import frontend
from homeassistant.core import HomeAssistant
from homeassistant.helpers import discovery
from homeassistant.helpers.typing import ConfigType
from . import websocket_api
from .const import DOMAIN
from .data import async_get_manager
async def is_configured(hass: HomeAssistant) -> bool:
"""Return a boolean to indicate if energy is configured."""
manager = await async_g
|
et_manager(hass)
if manager.data is None:
return False
return bool(manager.data != manager.default_preferences())
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Energy."""
websocket_api.async_setup(ha
|
ss)
frontend.async_register_built_in_panel(hass, DOMAIN, DOMAIN, "mdi:lightning-bolt")
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
hass.data[DOMAIN] = {
"cost_sensors": {},
}
return True
|
houssemFat/MeeM-Dev
|
teacher/apps/dashboard/views.py
|
Python
|
mit
| 3,848
| 0.013514
|
from django.forms.models import model_to_dict
from django.db.models import Count
from django.core.paginator import Paginator
import json
from core.apps.tools.common import render_json, dump_and_render_json,\
MeeMJSONEncoder
from core.apps.accounts.models import User
from core.apps.history.models import UserLogEntry
from teacher.apps.collaboration.models import Collaborator
from teacher.apps.collaboration.models import CollaboratorInvitation
from teacher.apps.collaboration.teams.models import Team, TeamMemberShip
from teacher.common import get_file_media_url
# User.objects.annotate(page_count=Count('page')).filter(page_count__gte=2).count()
def default(request):
if request.user.is_authenticated ():
userid = request.user.id
user = User.objects.select_related().get(pk=userid)
model = model_to_dict(user, ['username', 'email'])
invitations_count = CollaboratorInvitation.objects.filter(fromuser=user.id).count()
invitations_recieved = CollaboratorInvitation.objects.filter(usermail=user.email).count()
studentCount = 0
course_count = 0
courses = user.courses.all ()
for course in courses :
studentCount = studentCount + course.students.count ()
course_count = course_count + 1
staff_count = Team.objects.annotate(staff_count=Count('members')).filter(owner=user).values ('staff_count')
staff_count = staff_count[0]['staff_count']
"""
collaborations = user.my_collaborators.select_related().all()
other_collaborations = user.my_collaborators_with_others.select_related().all()
"""
collaborators = Collaborator.objects.filter(source=user).all ()
member_in_teams = TeamMemberShip.objects.filter(member__in=collaborators).select_related('team', 'assigned_tasks').all ()
tasks_count = 0
todos = []
# FIXME
for item in member_in_teams :
tasks_count += item.team.assigned_tasks.count()
for task in item.team.tasks.all() :
task_ = model_to_dict(task, ['id', 'start', 'end', 'title'])
if getattr(task, 'label', False):
task_.update({ 'color' : task.label.color})
else :
task_.update({ 'color' : '#ccc'})
todos.append(task_)
model.update({
'id' : user.id ,
'username' : user.email,
'img_not_found' : '/images/team/houssem.jpg',
'thamb_img_url' : get_file_media_url (user.profile.cover, 'location'),
'studentsCount' : studentCount,
'coursesCount' : course_count ,
'collaboratorsCount' : staff_count,
'tasksCount' : tasks_count,
'invitations_sent_count' : invitations_count,
'invitations_recieved_count' : invitations_recieved,
'progress' : get_profile_progress(user),
});
recents = user.history.all()
paginator = Paginator(recents, 10)
recents_activities = paginator.page(1)
recents_activities_list = []
for item in recents_activities :
item_ = model_to_dict(item, fields=['id', 'action_time', 'object_id'])
item_.update({'model' : item.content_type.model})
recents_activities_list.append(item_) #.push(item_)
model.update({'history' : recents_activities_list})
|
model.update({'todos' : todos})
return render_json(requ
|
est, json.dumps(model, encoding="utf-8", cls=MeeMJSONEncoder))
else :
return dump_and_render_json(request, None)
def get_profile_progress(user) :
# privacy
# lang
# web sites
# emails
# location
return 15
|
0xkasun/zaproxy
|
python/api/src/zapv2/core.py
|
Python
|
apache-2.0
| 14,644
| 0.010311
|
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2015 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
class core(object):
def __init__(self, zap):
self.zap = zap
def alert(self, id):
"""
Gets the alert with the given ID, the corresponding HTTP message can be obtained with the 'messageId' field and 'message' API method
"""
return next(self.zap._request(self.zap.base + 'core/view/alert/', {'id' : id}).itervalues())
def alerts(self, baseurl='', start='', count=''):
"""
Gets the alerts raised by ZAP, optionally filtering by URL and paginating with 'start' position and 'count' of alerts
"""
return next(self.zap._request(self.zap.base + 'core/view/alerts/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).itervalues())
def number_of_alerts(self, baseurl=''):
"""
Gets the number of alerts, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfAlerts/', {'baseurl' : baseurl}).itervalues())
@property
def hosts(self):
"""
Gets the name of the hosts accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/hosts/').itervalues())
@property
def sites(self):
"""
Gets the sites accessed through/by ZAP (scheme and domain)
"""
return next(self.zap._request(self.zap.base + 'core/view/sites/').itervalues())
@property
def urls(self):
"""
Gets the URLs accessed through/by ZAP
"""
return next(self.zap._request(self.zap.base + 'core/view/urls/').itervalues())
def message(self, id):
"""
Gets the HTTP message with the given ID. Returns the ID, request/response headers and bodies, cookies and note.
"""
return next(self.zap._request(self.zap.base + 'core/view/message/', {'id' : id}).itervalues())
def messages(self, baseurl='', start='', count=''):
"""
Gets the HTTP messages sent by ZAP, request and response, optionally filtered by URL and paginated with 'start' position and 'count' of messages
"""
return next(self.zap._request(self.zap.base + 'core/view/messages/', {'baseurl' : baseurl, 'start' : start, 'count' : count}).iterval
|
ues())
def number_of_messages(self, baseurl=''):
"""
Gets the
|
number of messages, optionally filtering by URL
"""
return next(self.zap._request(self.zap.base + 'core/view/numberOfMessages/', {'baseurl' : baseurl}).itervalues())
@property
def version(self):
"""
Gets ZAP version
"""
return next(self.zap._request(self.zap.base + 'core/view/version/').itervalues())
@property
def excluded_from_proxy(self):
"""
Gets the regular expressions, applied to URLs, to exclude from the Proxy
"""
return next(self.zap._request(self.zap.base + 'core/view/excludedFromProxy/').itervalues())
@property
def home_directory(self):
return next(self.zap._request(self.zap.base + 'core/view/homeDirectory/').itervalues())
def stats(self, keyprefix=''):
return next(self.zap._request(self.zap.base + 'core/view/stats/', {'keyPrefix' : keyprefix}).itervalues())
@property
def option_http_state_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpStateEnabled/').itervalues())
@property
def option_use_proxy_chain(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChain/').itervalues())
@property
def option_proxy_chain_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainName/').itervalues())
@property
def option_proxy_chain_port(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPort/').itervalues())
@property
def option_proxy_chain_skip_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainSkipName/').itervalues())
@property
def option_use_proxy_chain_auth(self):
return next(self.zap._request(self.zap.base + 'core/view/optionUseProxyChainAuth/').itervalues())
@property
def option_proxy_chain_user_name(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainUserName/').itervalues())
@property
def option_proxy_chain_realm(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainRealm/').itervalues())
@property
def option_proxy_chain_password(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPassword/').itervalues())
@property
def option_proxy_chain_prompt(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyChainPrompt/').itervalues())
@property
def option_http_state(self):
return next(self.zap._request(self.zap.base + 'core/view/optionHttpState/').itervalues())
@property
def option_timeout_in_secs(self):
return next(self.zap._request(self.zap.base + 'core/view/optionTimeoutInSecs/').itervalues())
@property
def option_single_cookie_request_header(self):
return next(self.zap._request(self.zap.base + 'core/view/optionSingleCookieRequestHeader/').itervalues())
@property
def option_proxy_excluded_domains(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomains/').itervalues())
@property
def option_proxy_excluded_domains_enabled(self):
return next(self.zap._request(self.zap.base + 'core/view/optionProxyExcludedDomainsEnabled/').itervalues())
@property
def option_default_user_agent(self):
return next(self.zap._request(self.zap.base + 'core/view/optionDefaultUserAgent/').itervalues())
def shutdown(self, apikey=''):
"""
Shuts down ZAP
"""
return next(self.zap._request(self.zap.base + 'core/action/shutdown/', {'apikey' : apikey}).itervalues())
def new_session(self, name='', overwrite='', apikey=''):
"""
Creates a new session, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/newSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def load_session(self, name, apikey=''):
"""
Loads the session with the given name. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/loadSession/', {'name' : name, 'apikey' : apikey}).itervalues())
def save_session(self, name, overwrite='', apikey=''):
"""
Saves the session with the name supplied, optionally overwriting existing files. If a relative path is specified it will be resolved against the "session" directory in ZAP "home" dir.
"""
return next(self.zap._request(self.zap.base + 'core/action/saveSession/', {'name' : name, 'overwrite' : overwrite, 'apikey' : apikey}).itervalues())
def snapshot_session(self, apikey=''):
return next(self.zap._request(self.zap.base + 'core/action/snapshotSession/', {'apikey' : apikey}).itervalues())
def clear_excluded_from_p
|
Pertino/pertino-sdk-python
|
pertinosdk/tests/pertinosdk_test.py
|
Python
|
mit
| 3,217
| 0.009636
|
'''
Created on Jul 26, 2014
@author: lwoydziak
'''
from mockito.mocking import mock
from pertinosdk import PertinoSdk, QueryBuilder, where
from mockito.mockito import when, verify
from mockito.matchers import any, Matcher
class Contains(Matcher):
def __init__(self, sub):
self.sub = sub
def matches(self, arg):
if not hasattr(arg, 'find'):
return
if not self.sub or len(self.sub) <= 0:
return
for sub in self.sub:
if not arg.find(sub) > -1:
return
return True
def __repr__(self):
return "<Contains: '%s'>" % (str(self.sub))
def setupSdk():
requests = mock()
pertinoSdk = PertinoSdk('a', 'b', requests)
response = mock()
when(requests).get(any(), auth=any()).thenReturn(response)
return pertinoSdk, requests, response
def test_CanRetrieveOrganizationListUnfiltered():
pertinoSdk, requests, response = setupSdk()
json = {"orgs": [{"name": "organization", "id": 1234}]}
when(response).json().thenReturn(json)
assert pertinoSdk.listOrgs() == json["orgs"]
verify(requests).get('http://api.labs.pertino.com:5000/api/v0-alpha/orgs?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=('a', 'b'))
def test_CanRetrieveOrganizationListFiltered():
pertinoSdk, _, response = setupSdk()
json = {"orgs": [{"name": "organization", "id": 1234}]}
when(response).json().thenReturn(json)
closure = mock()
pertinoSdk.listOrgs(closure=closure.function)
verify(closure).function(json["orgs"][0])
def test_CanRetrieveDevicesListUnfiltered():
pertinoSdk, requests, response = setupSdk()
json = {"devices": [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]}
when(response).json().thenReturn(json)
assert pertinoSdk.listDevicesIn({"id":1}) == json["devices"]
verify(requests).get('http://api.labs.pertino.com:5000/api/v0-alpha/orgs/1/devices?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=any())
def test_CanRetrieveDevicesListFiltered():
pertinoSdk, _, response = setupSdk()
json = {"devices": [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]}
when(response).json().thenReturn(json)
|
closure = mock()
pertinoSdk.listDevicesIn({"id":1}, closure.function)
verify(closure).function(json["devices"][0])
def test_CanDeleteMachine():
pertinoSdk, requests, response = setupSdk()
when(requests).delete(any(), auth=any()).thenReturn(response)
devices = [{"ipv4Address": "123.456.789.10", "hostName": "host", "id": 1234}]
pertinoSdk.deleteFrom
|
({"id":1}, devices)
verify(requests, times=1).delete('http://api.labs.pertino.com:5000/api/v0-alpha/orgs/1/devices/1234?user_key=993e79924d5b6346fe62a5cf62183bc5', auth=any())
def test_CanBuildClosureToFilterApiResponses():
isQueryBuilder = any(QueryBuilder)
assert isQueryBuilder.matches(where("any"))
closure = where("someField").contains("desired")
testDictionaryMatched = {"someField":"desired"}
assert closure(testDictionaryMatched)
testDictionaryNotMatched = {"someField":"nothing"}
assert not closure(testDictionaryNotMatched)
|
yephper/django
|
tests/template_tests/syntax_tests/test_exceptions.py
|
Python
|
bsd-3-clause
| 2,158
| 0.00139
|
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import Si
|
mpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
|
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
|
jaredhoberock/gotham
|
api/api.py
|
Python
|
apache-2.0
| 16,127
| 0.01637
|
#!/usr/env/bin python
# TODO
# 1. fix bug that if a script shares the same name as a material dll to load,
# the material may not be loaded correctly
# 2. fix multiple definitions of c++ -> python conversion for Spectrum
import sys
import os
import math
from libgotham import *
import inspect
def normalize(x):
length = math.sqrt(x[0]*x[0] + x[1]*x[1] + x[2]*x[2])
return (x[0] / length, x[1] / length, x[2] / length)
def cross(a,b):
v = (a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0])
return v
def mul(A, x):
b0 = A[ 0] * x[0] + A[ 1] * x[1] + A[ 2] * x[2] + A[ 3] * x[3]
b1 = A[ 4] * x[0] + A[ 5] * x[1] + A[ 6] * x[2] + A[ 7] * x[3]
b2 = A[ 8] * x[0] + A[ 9] * x[1] + A[10] * x[2] + A[11] * x[3]
b3 = A[12] * x[0] + A[13] * x[1] + A[14] * x[2] + A[15] * x[3]
return (b0, b1, b2, b3)
# define a class named 'PyGotham'
class PyGotham:
# standard shaderpaths
shaderpaths = ['.']
try:
shaderpaths += [os.path.join(os.environ['GOTHAMHOME'], 'shaders')]
except:
print 'Warning: $GOTHAMHOME undefined! Some shaders may not be found.'
# standard texturepaths
texturepaths = ['.']
try:
texturepaths += [os.path.join(os.environ['GOTHAMHOME'], 'textures')]
except:
print 'Warning: $GOTHAMHOME undefined! Some textures may not be found.'
# map texture aliases to texture handles
__textureMap = {}
# cache (shader, parameters) so as not to instantiate redundant shader objects
__shaderCache = {}
def __init__(self):
# by default, the subsystem is plain old Gotham
self.__subsystem = self.__createSubsystem("Gotham")
self.attribute("renderer:subsystem", "Gotham")
# include the directory containing this script
# in Python's search path
dir = os.path.dirname(inspect.getabsfile(mul))
sys.path += [dir]
def __createSubsystem(self, name, copyFrom = None):
result = None
# try to import every file in this directory
# look for the first one with a type of Gotham which matches name
dir = os.path.dirname(inspect.getabsfile(mul))
# try importing each file as a module
for file in os.listdir(dir):
fileBasename = os.path.splitext(file)[0]
try:
module = __import__(fileBasename)
if copyFrom == None:
# call the null constructor
# g = module.name()
exec "result = module." + name + "()"
else:
# call the copy constructor
# g =
|
modul
|
e.name(copyFrom)
exec "result = module." + name + "(copyFrom)"
del module
except:
pass
# stop at the first thing we were able to create
if result != None:
break;
return result
def pushMatrix(self):
return self.__subsystem.pushMatrix()
def popMatrix(self):
return self.__subsystem.popMatrix()
def translate(self, tx, ty, tz):
return self.__subsystem.translate(tx,ty,tz)
def rotate(self, degrees, rx, ry, rz):
return self.__subsystem.rotate(degrees, rx, ry, rz)
def scale(self, sx, sy, sz):
return self.__subsystem.scale(sx, sy, sz)
def getMatrix(self, m):
return self.__subsystem.getMatrix(m)
def sphere(self, cx, cy, cz, radius):
return self.__subsystem.sphere(cx, cy, cz, radius)
def pushAttributes(self):
return self.__subsystem.pushAttributes()
def popAttributes(self):
return self.__subsystem.popAttributes()
def attribute(self, name, value):
if value == False:
return self.__subsystem.attribute(name, str("false"))
elif value == True:
return self.__subsystem.attribute(name, str("true"))
else:
return self.__subsystem.attribute(name, str(value))
def getAttribute(self, name):
return self.__subsystem.getAttribute(name)
def material(self, name, *parms):
# pack parameters into a dictionary if necessary
parmDict = {}
if len(parms) > 1:
for i in range(0, len(parms), 2):
parmDict[parms[i]] = parms[i+1]
elif len(parms) == 1:
parmDict = parms[0]
# get the parameters and values into a hashable tuple
parmsTuple = tuple(zip(parmDict.keys(), parmDict.values()))
# first look in the cache
shaderHash = (name,parmsTuple).__hash__()
if self.__shaderCache.has_key(shaderHash):
# there's a hit, simply refer to the cached shader
handle = self.__shaderCache[shaderHash]
self.__subsystem.material(handle)
return True
else:
# XXX this is getting ugly
# add shaderpaths to os.path temporarily
oldpath = sys.path
sys.path += self.shaderpaths
#try:
# import the material
module = __import__(name)
# create a new material
m = module.createMaterial()
# set each parameter
for (p, val) in parmsTuple:
try:
setMethod = getattr(m, 'set_' + p)
try:
# first try to set it as if it were a 3-vector
setMethod(val[0], val[1], val[2])
except:
try:
# try a scalar instead
setMethod(val)
except:
print 'Warning: value %s for parameter %s has unknown type; material parameter left undefined.' % (val,p)
except:
print 'Warning: "%s" is not a parameter of material "%s"!' % (p, name)
# bind any dangling texture references
for member in dir(m):
handle = 0
alias = ''
try:
exec 'alias = m.%s.mAlias' % member
exec 'handle = m.%s.mHandle' % member
except:
continue;
if handle == 0 and alias != '':
# create the texture
exec 'm.%s.mHandle = self.texture(alias)' % member
del module
# send the material to the subsystem
materialHandle = self.__subsystem.material(m)
# cache the material
self.__shaderCache[shaderHash] = materialHandle
result = True
#except:
# print "Unable to find material '%s'." % name
# result = False
# restore paths
sys.path = oldpath
return result
def texture(self, *args):
# validate arguments
if len(args) != 1 and len(args) != 3:
raise ValueError, "texture() expects one (filename) or three (width,height,pixels) arguments."
if len(args) == 1:
name = args[0]
# find the file
for dir in self.texturepaths:
#fullpath = os.path.join(dir, name)
fullpath = dir + '/' + name
if os.path.exists(fullpath):
# does this texture exist?
try:
return self.__textureMap[fullpath]
except:
try:
result = self.__subsystem.texture(fullpath)
self.__textureMap[fullpath] = result
return result
except:
print "Warning: unable to load image file '%s'." % fullpath
return 0
else:
print fullpath, 'does not exist'
print "Warning: '%s' not found." % name
# return a reference to the default texture
return 0
if len(args) == 3:
# convert to a vector
pixels = args[0]
pixels = vector_float()
pixels[:] = args[2]
return self.__subsystem.texture(args[0],args[1],pixels)
def mesh(self, *args):
# validate arguments
if (len(args) != 2 and len(args) != 3) and len(args) != 4:
raise ValueError, "mesh() expects either two (points,indices), three (points,parms,indices), or four (points,parms,indices,normals) arguments."
# convert to vectors
points = args[0]
pointsvec = vector_float()
pointsvec[:] = points
if len(args) == 2:
faces = args[1]
elif len(args) == 3:
faces = args[2]
elif len(args) == 4:
faces = args[2]
# validate faces
if len(faces) == 0:
print 'mesh(): Warning: empty mesh detected.'
return
if (len(faces) % 3) != 0:
raise ValueError, "Triangle list not a multiple of 3!"
i = 0
for v in faces:
if v >= len(points) / 3:
raise ValueError, "Triangle %d refers to non-vertex!" % (i/3)
i += 1
facesvec = vector_uint()
facesvec[:] = faces
if len(args) == 2:
return self.__subsystem.mesh(p
|
atztogo/phonondb
|
phonondb/phonopy/band.py
|
Python
|
bsd-3-clause
| 7,188
| 0.001113
|
import numpy as np
import seekpath
class Band:
def __init__(self,
phonon,
num_qpoints=101):
|
self._phonon = phonon # Phonopy object
self._num_qpoints = num_qpoints
self._band = []
self._labels = None
self._connected = None
def run(self):
unitcell = self._phonon.unitcell
cell = (unitcell.get_cell(),
unitcell.get_scaled_positions(),
|
unitcell.get_atomic_numbers())
band_path = seekpath.get_path(cell)
self._set_band(band_path)
self._set_labels(band_path)
return self._run_band()
def get_band(self):
return self._phonon.get_band_structure()
def plot_band(self, plt, delta_d=0.02):
fig, ax = plt.subplots()
_, distances, frequencies, _ = self._phonon.get_band_structure()
d_shift = 0
d_point = []
special_points = []
unconnected_points = [0]
for d, f, c in zip(distances, frequencies, self._connected):
special_points.append(d[0] + d_shift)
if not c:
d_shift += delta_d
special_points.append(d[0] + d_shift)
unconnected_points.append(special_points[-2])
unconnected_points.append(special_points[-1])
plt.plot(d + d_shift, f, 'r-', linewidth=1)
special_points.append(distances[-1][-1] + d_shift)
unconnected_points.append(special_points[-1])
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
plt.ylabel('Frequency (THz)')
plt.xlabel('Wave vector')
plt.xlim(0, special_points[-1])
plt.xticks(special_points, self._labels)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_tick_params(which='both', direction='in')
ax.yaxis.set_tick_params(which='both', direction='in')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
for d in unconnected_points:
plt.axvline(x=d, linestyle='-', linewidth=1.5, color='k')
x_pairs = np.reshape(unconnected_points, (-1, 2))
x_pairs /= unconnected_points[-1]
ymin, ymax = ax.get_ylim()
for pair in x_pairs:
plt.axhline(y=0, xmin=pair[0], xmax=pair[1],
linestyle=':', linewidth=0.5, color='b')
plt.axhline(y=ymin, xmin=pair[0], xmax=pair[1],
linestyle='-', linewidth=1.5, color='k')
plt.axhline(y=ymax, xmin=pair[0], xmax=pair[1],
linestyle='-', linewidth=1.5, color='k')
fig.tight_layout()
def write_band_yaml(self):
self._phonon.write_yaml_band_structure()
def save_band(self, plt):
plt.savefig("band.png")
def _set_band(self, band_path):
point_coords = band_path['point_coords']
for path in band_path['path']:
self._append_band(point_coords[path[0]], point_coords[path[1]])
def _set_labels(self, band_path):
labels = []
prev_path = None
connected = []
point_coords = band_path['point_coords']
for path in band_path['path']:
if prev_path and prev_path[1] != path[0]:
labels.append(prev_path[1])
connected.append(False)
else:
connected.append(True)
labels.append(path[0])
prev_path = path
labels.append(prev_path[1])
for i, l in enumerate(labels):
if 'GAMMA' in l:
labels[i] = "$" + l.replace("GAMMA", "\Gamma") + "$"
elif 'SIGMA' in l:
labels[i] = "$" + l.replace("SIGMA", "\Sigma") + "$"
elif 'DELTA' in l:
labels[i] = "$" + l.replace("DELTA", "\Delta") + "$"
else:
labels[i] = "$\mathrm{%s}$" % l
self._labels = labels
self._connected = connected
def _append_band(self, q_start, q_end):
band = []
nq = self._num_qpoints
for i in range(nq):
band.append(np.array(q_start) +
(np.array(q_end) - np.array(q_start)) / (nq - 1) * i)
self._band.append(band)
def _run_band(self):
return self._phonon.set_band_structure(self._band)
if __name__ == '__main__':
import os
import sys
import yaml
from phonopy import Phonopy
from phonopy.interface.phonopy_yaml import get_unitcell_from_phonopy_yaml
from phonopy.file_IO import parse_FORCE_SETS, parse_BORN
import matplotlib
def frac2val(string):
if '/' in string:
num, denom = [float(x) for x in string.split('/')]
return num / denom
else:
return float(string)
if len(sys.argv) > 1:
cell = get_unitcell_from_phonopy_yaml(sys.argv[1])
else:
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon_info = yaml.load(open("phonon.yaml"))
cell = get_unitcell_from_phonopy_yaml("POSCAR-unitcell.yaml")
phonon = None
if os.path.isfile("phonopy.conf"):
with open("phonopy.conf") as f:
for line in f:
if 'PRIMITIVE_AXIS' in line:
prim_vals = [frac2val(x) for x in line.split()[2:]]
if len(prim_vals) == 9:
primitive_matrix = np.reshape(prim_vals, (3, 3))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
primitive_matrix=primitive_matrix)
else:
print("PRIMITIVE_AXIS is something wrong.")
sys.exit(1)
break
if phonon is None:
phonon = Phonopy(cell, phonon_info['supercell_matrix'])
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
if os.path.isfile("BORN"):
with open("BORN") as f:
primitive = phonon.get_primitive()
nac_params = parse_BORN(primitive, filename="BORN")
nac_params['factor'] = 14.399652
phonon.set_nac_params(nac_params)
band = Band(phonon, num_qpoints=101)
if band.run():
band.write_band_yaml()
_, distances, frequencies, _ = band.get_band()
d_end = distances[-1][-1]
f_max = np.max(frequencies)
primitive = phonon.get_primitive()
num_atom = primitive.get_number_of_atoms()
length = num_atom ** (1.0 / 3) * 4.5
figsize_x = d_end * length
margin = 0.7
scale = 0.15
delta_d = d_end / (figsize_x - margin) * scale
matplotlib.use('Agg')
matplotlib.rcParams.update({'figure.figsize': (figsize_x, 3.1),
'font.family': 'serif'})
import matplotlib.pyplot as plt
band.plot_band(plt, delta_d=(delta_d))
band.save_band(plt)
|
lociii/jukebox_mpg123
|
jukebox_mpg123/management/commands/jukebox_mpg123.py
|
Python
|
mit
| 3,441
| 0.001453
|
# -*- coding: UTF-8 -*-
from django.core.management.base import BaseCommand
from optparse import make_option
import daemon
import daemon.pidfile
from signal import SIGTSTP, SIGTERM, SIGABRT
import sys, os, subprocess
import time
from jukebox.jukebox_core import api
class Command(BaseCommand):
daemon = None
proc = None
mpg123 = None
option_list = BaseCommand.option_list + (
make_option(
"--start",
action="store_true",
dest="start",
help="Start mpg123 playback"
),
make_option(
"--stop",
action="store_true",
dest="stop",
help="Stop mpg123 playback"
),
)
def handle(self, *args, **options):
# check if mpg123 is available
fin, fout = os.popen4(["which", "mpg123"])
self.mpg123 = fout.read().replace("\n", "")
if not len(self.mpg123):
print "mpg123 is not installed"
return
pidFile = os.path.dirname(
os.path.abspath(__file__)
) + "/../../daemon.pid"
if options["start"]:
if os.path.exists(pidFile):
print "Daemon already running, pid file exists"
return
pid = daemon.pidfile.TimeoutPIDLockFile(
pidFile,
10
)
print "Starting jukebox_mpg123 daemon..."
self.daemon = daemon.DaemonContext(
uid=os.getuid(),
gid=os.getgid(),
pidfile=pid,
working_directory=os.getcwd(),
detach_process=True,
signal_map={
SIGTSTP: self.shutdown,
SIGABRT: self.skipSong
}
)
with self.daemon:
print "Register player"
pid = int(open(pidFile).read())
players_api = api.players()
players_api.add(pid)
self.play()
elif options["stop"]:
if not os.path.exists(pidFile):
print "Daemon not running"
return
print "Stopping daemon..."
pid = int(open(pidFile).read())
os.kill(pid, SIGTSTP)
print "Unregister player " + str(pid)
players_api = api.players()
players_api.remove(pid)
else:
self.print_help("jukebox_mpg123", "help")
def play(self):
songs_api = api.songs()
while 1:
if self.proc is None:
song_instance = songs_api.getNextSong()
if not os.path.exists(song_instance.Filename):
print "File not found: %s" % song_instance.Filename
continue
print "Playing " + song_instance.Filename
|
self.proc = subprocess.Popen(
[self.mpg123, song_instance.Filename]
|
)
else:
if not self.proc.poll() is None:
self.proc = None
time.sleep(0.5)
def shutdown(self, signal, action):
if not self.proc is None:
os.kill(self.proc.pid, SIGTERM)
if not self.daemon is None:
self.daemon.close()
sys.exit(0)
def skipSong(self, signal, action):
if not self.proc is None:
os.kill(self.proc.pid, SIGTERM)
|
ohtaman/pynm
|
pynm/reinforce/bandit/thompson.py
|
Python
|
mit
| 683
| 0.001464
|
# -*- coding:utf-8 -*-
from collections import defaultdict
import numpy
class ThompsonAg
|
ent:
def __init__(self, seed=None):
self._succeeds = defaultdict(int)
self._fails = defaultdict(int)
self._np_random = numpy.random.RandomState(seed)
def choose(self, arms, features=None):
return max(arms, key=lambda arm: self._score(arm))
def _score(self, arm):
return self._np_random.beta(
self._succeeds[arm] + 0.5,
self._fails[arm] + 0.5)
def update(se
|
lf, arm, reward, arms=None, features=None):
if reward > 0:
self._succeeds[arm] += 1
else:
self._fails[arms] += 1
|
kklmn/xrt
|
xrt/backends/raycing/physconsts.py
|
Python
|
mit
| 1,189
| 0.003364
|
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementi
|
ev", "Roman Chernikov"
__date__ = "07 Jan 2016"
PI = 3.1415926535897932384626433832795
PI2 = 6.283185307179586476925286766559
SQRT2PI = PI2**0.5 # =2.5066282746310002
SQ3 = 1.7320508075688772935274463415059
SQ2 = 2**0.5 # =1.4142135623730951
SQPI = PI**0.5 # =1.7724538509055159
SIE0 = 1.602176565e-19
#E0 = 4.803e-10 # [esu]
C = 2.99792458e10 # [cm/sec]
E0 = SIE0 * C / 10
M0 = 9.109383701
|
528e-28 # [g]
SIM0 = 9.109383701528e-31
M0C2 = 0.510998928 # MeV
HPLANCK = 6.626069573e-27 # [erg*sec]
EV2ERG = 1.602176565e-12 # Energy conversion from [eV] to [erg]
K2B = 2 * PI * M0 * C**2 * 0.001 / E0 # =10.710201593926415
# EMC = SIE0 / SIM0 / C[mm]
EMC = 0.5866791802416487
SIHPLANCK = 6.626069573e-34
#SIM0 = M0 * 1e-3
SIC = C * 1e-2
FINE_STR = 1 / 137.03599976
#E2W = PI2 * SIE0 / SIH # w = E2W * E[eV]
E2W = 1519267514747457.9195337718065469
E2WC = 5067.7309392068091
R0 = 2.817940285e-5 # A
AVOGADRO = 6.02214199e23 # atoms/mol
CHeVcm = HPLANCK * C / EV2ERG # {c*h[eV*cm]} = 0.00012398419297617678
CH = CHeVcm * 1e8 # {c*h[eV*A]} = 12398.419297617678
CHBAR = CH / PI2 # {c*h/(2pi)[eV*A]} = 1973.2697177417986
|
ankanaan/chimera
|
src/chimera/gui/modules/camera.py
|
Python
|
gpl-2.0
| 11,323
| 0
|
from chimera.core.callback import callback
from chimera.core.exceptions import printException
from chimera.gui.modules.canvas import FITS, FITSCanvas
from chimera.gui.module import ChimeraGUIModule
from chimera.interfaces.camera import CameraStatus
from chimera.util.image import Image
import gtk
import glib
import gdl
import time
import urllib
import os
import threading
import copy
class ImageViewer:
def __init__(self, main):
self.main = main
self.notebook = self.main.builder.get_object("imagesNotebook")
self.notebook.append_page(gtk.Label("No images"))
self.first_image = True
def newImage(self, image):
fits = FITS(image.filename())
canvas = FITSCanvas(fits.frame)
if self.first_image:
self.notebook.remove_page(0)
self.first_image = False
tab_num = self.notebook.append_page(
canvas.window, gtk.Label(os.path.basename(image.filename())))
self.notebook.set_current_page(tab_num)
class CameraController:
def __init__(self, module):
self.module = module
self.camera = None
self.wheel = None
def setCamera(self, camera):
self.camera = camera
@callback(self.module.manager)
def exposeBegin(request):
self.module.view.exposeBegin(request)
@callback(self.module.manager)
def exposeComplete(request, status):
if status == CameraStatus.OK:
self.module.view.exposeComplete(request)
else:
self.module.view.abort()
@callback(self.module.manager)
def readoutBegin(request):
self.module.view.readoutBegin(request)
@callback(self.module.manager)
def readoutComplete(image, status):
if status == CameraStatus.OK:
self.module.view.readoutComplete(image)
else:
self.module.view.abort()
self.camera.exposeBegin += exposeBegin
self.camera.exposeComplete += exposeComplete
self.camera.readoutBegin += readoutBegin
self.camera.readoutComplete += readoutComplete
def getCamera(self):
# create a copy of Proxy to make sure multiple threads don't reuse it
return copy.copy(self.camera)
def setFilterWheel(self, wheel):
self.w
|
heel = wheel
def getWheel(self):
# transfer to current thread and return (a hacky way to reuse Proxies)
self.wheel._transferThread()
return self.wh
|
eel
def expose(self):
camera = self.getCamera()
durationSpin = self.module.builder.get_object("durationSpin")
duration = durationSpin.get_value()
framesSpin = self.module.builder.get_object("framesSpin")
frames = framesSpin.get_value()
shutterOpen = self.module.builder.get_object("shutterOpen")
if(shutterOpen.get_active()):
shutterState = "OPEN"
else:
shutterState = "CLOSE"
filters = self.module.builder.get_object(
"filtersBox").get_children()[1].get_children()
current = None
for f in filters:
if f.get_active():
current = f
filterName = current.get_label()
self.module.view.begin(duration, frames)
if self.getWheel().getFilter() != filterName:
self.module.view.beginFilterChange(filterName)
self.getWheel().setFilter(filterName)
self.module.view.endFilterChange(filterName)
try:
camera.expose({"exptime": duration,
"frames": frames,
"shutter": shutterState})
except Exception, e:
printException(e)
finally:
self.module.view.end()
def abortExposure(self):
self.getCamera().abortExposure()
self.module.view.abort()
class CameraView:
def __init__(self, module):
self.module = module
self.exposureStatusbar = self.module.builder.get_object(
"exposureStatusbar")
self.exposureLabel = self.module.builder.get_object("exposureLabel")
self.exposureProgress = self.module.builder.get_object(
"exposureProgress")
self.exposureLabel.hide()
self.exposureProgress.hide()
self.exposureProgress.set_pulse_step(0.1)
self.frames = 0
self.exptime = 0
self.currentFrame = 0
self.exposeTimer = None
self.readoutTimer = None
self.filterTimer = None
def begin(self, exptime, frames):
self.frames = frames
self.exptime = exptime
self.currentFrame = 0
def ui():
self.module.builder.get_object(
"abortExposureButton").set_sensitive(True)
self.module.builder.get_object("exposeButton").set_sensitive(False)
self.exposureLabel.set_label(
"<b>%-2d/%-2d</b>" % (self.currentFrame, self.frames))
self.exposureProgress.set_fraction(0.0)
self.exposureProgress.set_text("starting ...")
self.exposureLabel.show()
self.exposureProgress.show()
glib.idle_add(ui)
def exposeBegin(self, imageRequest):
startTime = time.time()
timeout = startTime + self.exptime
self.currentFrame += 1
def ui():
self.exposureLabel.set_label(
"<b>%-2d/%-2d</b>" % (self.currentFrame, self.frames))
self.exposureProgress.set_fraction(0.0)
def exposeTimer(startTime, timeout):
now = time.time()
if now >= timeout:
return False
counter = now - startTime
self.exposureProgress.set_fraction(counter / self.exptime)
self.exposureProgress.set_text(
"%.2f" % (self.exptime - counter))
return True
self.exposeTimer = glib.timeout_add(
100, exposeTimer, startTime, timeout)
glib.idle_add(ui)
def exposeComplete(self, imageRequest):
def ui():
self.exposureProgress.set_fraction(1.0)
self.exposureProgress.set_text("exposure complete ...")
if self.exposeTimer:
glib.source_remove(self.exposeTimer)
self.exposeTimer = 0
glib.idle_add(ui)
def readoutBegin(self, imageRequest):
def ui():
self.exposureProgress.set_text("reading out and saving ...")
def readoutTimer():
self.exposureProgress.pulse()
return True
self.readoutTimer = glib.timeout_add(100, readoutTimer)
glib.idle_add(ui)
def readoutComplete(self, image):
if self.readoutTimer:
glib.source_remove(self.readoutTimer)
self.readoutTimer = 0
def ui():
self.exposureProgress.set_fraction(1.0)
self.exposureProgress.set_text("readout and save complete ...")
url = image.http()
imageFileName = urllib.urlretrieve(
url, filename=os.path.basename(image.filename()))[0]
imageFile = Image.fromFile(imageFileName)
self.module.imageViewer.newImage(imageFile)
glib.idle_add(ui)
def end(self):
def ui():
self.exposureLabel.hide()
self.exposureProgress.hide()
self.module.builder.get_object(
"abortExposureButton").set_sensitive(False)
self.module.builder.get_object("exposeButton").set_sensitive(True)
glib.idle_add(ui)
def abort(self):
def ui():
self.exposureProgress.set_text("aborted!")
self.module.builder.get_object(
"abortExposureButton").set_sensitive(False)
self.module.builder.get_object("exposeButton").set_sensitive(True)
def abortTimer():
self.exposureLabel.hide()
self.exposureProgress.hide()
return False
glib.timeout_add(2000, abortTimer)
if self.expos
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/topology_resource_py3.py
|
Python
|
mit
| 1,570
| 0.005096
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyResource(Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other
resources in the resource group.
:type associations:
list[~azure.mgmt.network.v2017_11_01.models.TopologyAss
|
ociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def _
|
_init__(self, *, name: str=None, id: str=None, location: str=None, associations=None, **kwargs) -> None:
super(TopologyResource, self).__init__(**kwargs)
self.name = name
self.id = id
self.location = location
self.associations = associations
|
ngokevin/zamboni
|
apps/amo/tests/test_log.py
|
Python
|
bsd-3-clause
| 976
| 0
|
"""Tests for the activitylog."""
from datetime import datetime
from nose.tools import eq_
import amo
import amo.tests
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
class LogTest(amo.tests.TestCase):
def setUp(self
|
):
u = UserProfile.objects.create(username='foo')
amo.set_user(u)
def test_details(self):
"""
If we get details, verify they are stored as JSON, and we get out what
we put in.
"""
a = Webapp.objects.create(name='kumar is awesome')
magic = dict(title=
|
'no', body='way!')
al = amo.log(amo.LOG.DELETE_REVIEW, 1, a, details=magic)
eq_(al.details, magic)
eq_(al._details, '{"body": "way!", "title": "no"}')
def test_created(self):
"""
Verify that we preserve the create date.
"""
al = amo.log(amo.LOG.CUSTOM_TEXT, 'hi', created=datetime(2009, 1, 1))
eq_(al.created, datetime(2009, 1, 1))
|
Droriel/python_training_mantis
|
fixture/mail.py
|
Python
|
apache-2.0
| 858
| 0.004662
|
import poplib
import email
import time
class MailHelper:
def __init__(self, app
|
):
self.app = app
def get_mail(self, username, password, s
|
ubject):
for i in range (5):
pop = poplib.POP3(self.app.config['james']['host'])
pop.user(username)
pop.pass_(password)
num = pop.stat()[0]
if num>0:
for n in range(num):
msglines = pop.retr(n+1)[1]
msgtext = "\n".join(map(lambda x: x.decode('utf-8'), msglines))
msg = email.message_from_string(msgtext)
if msg.get('Subject') == subject:
pop.dele(n+1)
pop.close()
return msg.get_payload()
pop.close()
time.sleep(3)
return None
|
openstack/neutron-classifier
|
neutron_classifier/tests/unit/cli/__test_db_classifications.py
|
Python
|
apache-2.0
| 2,667
| 0
|
# Can't be run at the moment until migration with openstack-client
# Copyright (c) 2018 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.extensions import base as test_extensions_base
f
|
rom neutronclient.v2_0 import client
OPENSTACK_CLI_ID = "/ccf/classifications"
ASSOCS_PATH = "/ccf/classifications"
NET_ASSOC_ID = "uuid_client_foo"
class OpenstackClientTestCase(test_extensions_base.ExtensionTestCase):
def setUp(self):
super(OpenstackClientTestCase, self).setUp()
self.client = client.Client()
self.client.list_ext = mock.Mock()
self.client.create_e
|
xt = mock.Mock()
self.client.show_ext = mock.Mock()
self.client.update_ext = mock.Mock()
self.client.delete_ext = mock.Mock()
print("self.client keys: ", dir(self.client))
def test_client_url_list(self):
self.client.ListIPV4Classification(OPENSTACK_CLI_ID)
self.client.list_ext.assert_called_once_with(mock.ANY, ASSOCS_PATH,
mock.ANY)
def test_client_url_create(self):
self.client.CreateIPV4Classification(OPENSTACK_CLI_ID, {})
self.client.create_ext.assert_called_once_with(ASSOCS_PATH, mock.ANY)
def test_client_url_show(self):
self.client.ShowIPV4Classification(NET_ASSOC_ID, OPENSTACK_CLI_ID)
self.client.show_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID)
def test_client_url_update(self):
self.client.UpdateIPV4Classification(NET_ASSOC_ID,
OPENSTACK_CLI_ID, {})
self.client.update_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID,
mock.ANY)
def test_client_url_delete(self):
self.client.DeleteIPV4Classification(NET_ASSOC_ID, OPENSTACK_CLI_ID)
self.client.delete_ext.assert_called_once_with(ASSOCS_PATH,
NET_ASSOC_ID)
|
hzlf/openbroadcast
|
website/legacy/obp_legacy/models_legacy.py
|
Python
|
gpl-3.0
| 31,942
| 0.005134
|
# This is an auto-generated Django model module.
# You'll have to do the follow
|
ing manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class ElggCalendarEvents(models.
|
Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
calendar = models.IntegerField()
title = models.CharField(max_length=765, blank=True)
description = models.TextField()
access = models.CharField(max_length=765, blank=True)
location = models.CharField(max_length=150, blank=True)
date_start = models.IntegerField()
date_end = models.IntegerField()
class Meta:
db_table = u'elgg_calendar_events'
class ElggCcAccess(models.Model):
id = models.IntegerField(primary_key=True)
gunid = models.CharField(max_length=765, blank=True)
token = models.BigIntegerField(null=True, blank=True)
chsum = models.CharField(max_length=96, blank=True)
ext = models.CharField(max_length=384, blank=True)
type = models.CharField(max_length=60, blank=True)
parent = models.BigIntegerField(null=True, blank=True)
owner = models.IntegerField(null=True, blank=True)
ts = models.DateTimeField()
class Meta:
db_table = u'elgg_cc_access'
class ElggCcGunid(models.Model):
id = models.IntegerField(primary_key=True)
type = models.CharField(max_length=765, blank=True)
objid = models.IntegerField(null=True, blank=True)
gunid = models.CharField(max_length=765, blank=True)
class Meta:
db_table = u'elgg_cc_gunid'
class ElggCcTransport(models.Model):
id = models.IntegerField(primary_key=True)
trtoken = models.CharField(max_length=48, blank=True)
direction = models.CharField(max_length=384, blank=True)
state = models.CharField(max_length=384, blank=True)
trtype = models.CharField(max_length=384, blank=True)
lock = models.CharField(max_length=3, blank=True)
target = models.CharField(max_length=765, blank=True)
rtrtok = models.CharField(max_length=48, blank=True)
mdtrtok = models.CharField(max_length=48, blank=True)
gunid = models.BigIntegerField(null=True, blank=True)
pdtoken = models.BigIntegerField(null=True, blank=True)
url = models.CharField(max_length=765, blank=True)
localfile = models.CharField(max_length=765, blank=True)
fname = models.CharField(max_length=765, blank=True)
title = models.CharField(max_length=765, blank=True)
expectedsum = models.CharField(max_length=96, blank=True)
realsum = models.CharField(max_length=96, blank=True)
expectedsize = models.IntegerField(null=True, blank=True)
realsize = models.IntegerField(null=True, blank=True)
uid = models.IntegerField(null=True, blank=True)
errmsg = models.CharField(max_length=765, blank=True)
jobpid = models.IntegerField(null=True, blank=True)
start = models.DateTimeField()
starttime = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'elgg_cc_transport'
class ElggCmBaskets(models.Model):
userid = models.IntegerField(unique=True)
baskets = models.TextField()
updated = models.DateTimeField()
migrated = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_cm_baskets'
class ElggCmContainer(models.Model):
ident = models.IntegerField(primary_key=True)
x_ident = models.IntegerField()
body = models.TextField()
content_list = models.TextField()
container_type = models.CharField(max_length=150)
date_time = models.IntegerField()
target_duration = models.CharField(max_length=30)
duration = models.DecimalField(max_digits=14, decimal_places=4)
sub_type = models.IntegerField()
best_broadcast_segment = models.CharField(max_length=1200)
best_broadcast_daytime = models.CharField(max_length=60)
best_broadcast_weekday = models.CharField(max_length=60)
livesession_license = models.IntegerField()
played = models.IntegerField()
rotation_include = models.IntegerField()
rebroadcast_url = models.CharField(max_length=1536)
class Meta:
db_table = u'elgg_cm_container'
class ElggCmFile(models.Model):
ident = models.IntegerField(primary_key=True)
file = models.CharField(max_length=765)
x_ident = models.IntegerField()
posted = models.IntegerField()
filetype = models.CharField(max_length=240)
class Meta:
db_table = u'elgg_cm_file'
class ElggCmLog(models.Model):
ident = models.IntegerField(primary_key=True)
type = models.CharField(max_length=60)
content_ident = models.IntegerField()
action = models.CharField(max_length=180)
user_ident = models.IntegerField()
timestamp = models.IntegerField()
class Meta:
db_table = u'elgg_cm_log'
class ElggCmMaster(models.Model):
ident = models.IntegerField(primary_key=True)
owner = models.IntegerField()
editor = models.IntegerField()
lastupdater = models.IntegerField()
type = models.CharField(max_length=60)
title = models.TextField()
intro = models.TextField()
access = models.CharField(max_length=60, blank=True)
access_write = models.CharField(max_length=60)
lastupdate = models.IntegerField()
posted = models.IntegerField()
is_history = models.IntegerField()
index = models.TextField()
status = models.IntegerField()
duration = models.IntegerField()
notes = models.IntegerField()
revnumber = models.IntegerField()
locked = models.IntegerField()
locked_userident = models.IntegerField()
migrated = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = u'elgg_cm_master'
class ElggCmMedias(models.Model):
id = models.IntegerField(primary_key=True)
x_id = models.IntegerField()
created = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField(null=True, blank=True)
published = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=60, blank=True)
notes = models.TextField(blank=True)
filesize = models.IntegerField(null=True, blank=True)
fileformat = models.CharField(max_length=36, blank=True)
dataformat = models.CharField(max_length=36, blank=True)
channels = models.IntegerField(null=True, blank=True)
sample_rate = models.IntegerField(null=True, blank=True)
bitrate = models.IntegerField(null=True, blank=True)
channelmode = models.CharField(max_length=36, blank=True)
bitrate_mode = models.CharField(max_length=36, blank=True)
lossless = models.IntegerField(null=True, blank=True)
encoder_options = models.CharField(max_length=36, blank=True)
compression_ratio = models.DecimalField(null=True, max_digits=14, decimal_places=4, blank=True)
encoding = models.CharField(max_length=36, blank=True)
path = models.CharField(max_length=3072, blank=True)
sourcepath = models.CharField(max_length=3072, blank=True)
parentdirectory = models.CharField(max_length=750, blank=True)
filename = models.CharField(max_length=750, blank=True)
pipeline_status = models.IntegerField()
has_flac_default = models.IntegerField()
has_mp3_default = models.IntegerField()
has_mp3_64 = models.IntegerField()
has_mp3_128 = models.IntegerField()
has_mp3_320 = models.IntegerField()
has_peakfile = models.IntegerField()
has_peakfile_raw = models.IntegerField()
has_peakfile_mp3 = models.IntegerField()
lock = models.IntegerField()
class Meta:
db_table = u'elgg_cm_medias'
class ElggCmRelations(models.Model):
ident = models.IntegerField(primary_key=True)
c_ident_master = models.IntegerField()
c_ident_slave = models.IntegerField()
relation_type = models.IntegerField()
user_ident = models.IntegerField()
class Meta:
db_table = u'elgg_cm_relations'
class ElggCmText(models.Model):
ident = models.IntegerField(primary_key=True)
bod
|
tomachalek/kontext
|
lib/plugins/default_token_connect/backends/treq.py
|
Python
|
gpl-2.0
| 6,004
| 0.002165
|
# Copyright (c) 2018 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2018 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import logging
import urllib
from plugins.default_token_connect.backends.cache import cached
from plugins.default_token_connect.backends import HTTPBackend
class TreqBackend(HTTPBackend):
"""
Treq args:
jazyk1:cs
jazyk2:en
hledejKde[]:ACQUIS
hledejKde[]:CORE
hledejKde[]:EUROPARL
hledejKde[]:PRESSEUROP
hledejKde[]:SUBTITLES
hledejKde[]:SYNDICATE
hledejCo:obnova
searchGo:
viceslovne:
lemma:
"""
DEFAULT_MAX_RESULT_LINES = 10
AVAIL_GROUPS = None
AVAIL_LANG_MAPPINGS = None
def __init__(self, conf, ident):
super(TreqBackend, self).__init__(conf, ident)
self._conf = conf
self.AVAIL_GROUPS = conf.get('availGroups', {})
self.AVAIL_LANG_MAPPINGS = conf.get('availTranslations', {})
@staticmethod
def _lang_from_corpname(corpname):
return corpname.split('_')[-1]
def _find_second_lang(self, corpora):
"""
Find a first language+corpus with available translations
for the pr
|
imary lang
|
uage (= corpora[0]).
"""
primary_lang = self._lang_from_corpname(corpora[0])
translations = self.AVAIL_LANG_MAPPINGS.get(primary_lang, [])
for cn in corpora[1:]:
lang = self._lang_from_corpname(cn)
if lang in translations:
return cn, lang
return None, None
def enabled_for_corpora(self, corpora):
corp1 = corpora[0]
corp2 = corpora[1] if len(corpora) > 1 else None
if corp2 is None:
return False
lang1 = self._lang_from_corpname(corp1)
lang2 = self._lang_from_corpname(corp2)
return lang1 in self.AVAIL_LANG_MAPPINGS and lang2 in self.AVAIL_LANG_MAPPINGS[lang1]
@staticmethod
def mk_api_args(lang1, lang2, groups, lemma):
multiw_flag = '1' if ' ' in lemma else '0'
lemma_flag = '0' if ' ' in lemma else '1'
groups = ','.join(groups)
return [('left', lang1), ('right', lang2), ('viceslovne', multiw_flag), ('regularni', '0'),
('lemma', lemma_flag), ('aJeA', '1'), ('hledejKde', groups), ('hledejCo', lemma),
('order', 'percDesc')]
@staticmethod
def mk_page_args(lang1, lang2, groups, lemma):
multiw_flag = '1' if ' ' in lemma else '0'
lemma_flag = '0' if ' ' in lemma else '1'
return [('jazyk1', lang1), ('jazyk2', lang2), ('viceslovne', multiw_flag), ('regularni', '0'),
('lemma', lemma_flag), ('caseInsen', '1'), ('hledejCo', lemma)] + [('hledejKde[]', g) for g in groups]
def mk_api_path(self, args):
args = ['{0}={1}'.format(k, urllib.quote(v.encode('utf-8'))) for k, v in args]
return '/api.php?api=true&' + '&'.join(args)
def find_lang_common_groups(self, lang1, lang2):
g1 = set(self.AVAIL_GROUPS.get(lang1, []))
g2 = set(self.AVAIL_GROUPS.get(lang2, []))
return g1.intersection(g2)
def mk_server_addr(self):
if self._conf.get('ssl', False):
return ('https://' + self._conf['server']).encode('utf-8')
return ('http://' + self._conf['server']).encode('utf-8')
@cached
def fetch(self, corpora, token_id, num_tokens, query_args, lang):
"""
"""
primary_lang = self._lang_from_corpname(corpora[0])
translat_corp, translat_lang = self._find_second_lang(corpora)
treq_link = None
if translat_corp and translat_lang:
common_groups = self.find_lang_common_groups(primary_lang, translat_lang)
args = dict(lang1=self.enc_val(primary_lang), lang2=self.enc_val(translat_lang),
groups=[self.enc_val(s) for s in common_groups],
**query_args)
t_args = self.mk_page_args(**args)
treq_link = (self.mk_server_addr() + '/index.php', t_args)
ta_args = self.mk_api_args(lang1=args['lang1'], lang2=args['lang2'], groups=args['groups'],
lemma=args['lemma'])
connection = self.create_connection()
try:
logging.getLogger(__name__).debug(u'Treq request args: {0}'.format(ta_args))
connection.request('GET', self.mk_api_path(ta_args))
data, status = self.process_response(connection)
data = json.loads(data)
max_items = self._conf.get('maxResultItems', self.DEFAULT_MAX_RESULT_LINES)
data['lines'] = data['lines'][:max_items]
except ValueError:
logging.getLogger(__name__).error(u'Failed to parse response: {0}'.format(data))
data = dict(sum=0, lines=[])
finally:
connection.close()
else:
data = dict(sum=0, lines=[])
return json.dumps(dict(treq_link=treq_link,
sum=data.get('sum', 0),
translations=data.get('lines', []),
primary_corp=corpora[0],
translat_corp=translat_corp)), True
|
wonder041/MegaPipeline
|
Standardize.py
|
Python
|
mit
| 264
| 0.015152
|
import sys
from Bio import SeqIO
input_file = sys.argv[1]
output_file = sys
|
.argv[2]
def Ungap(seq):
seq.seq=seq.seq.ungap('-')
return seq
output_gen = (Ungap(seq) for seq in SeqIO.parse(input_file, 'fasta'))
SeqIO.write(output_gen,ou
|
tput_file, 'fasta')
|
fachschaft-medizin-rostock/django-fsmedhro
|
exoral/migrations/0005_merge_20170331_1617.py
|
Python
|
mit
| 337
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-31 14:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('exoral', '0004_merge_20170327_0002'),
('exoral', '0003_auto_20170322_1453'),
]
operat
|
ions = [
]
|
|
palfrey/kitling
|
frontend/videos/migrations/0015_add_channel.py
|
Python
|
agpl-3.0
| 907
| 0.005513
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('videos', '0014_add_enabled_and_notes'),
]
operations = [
migrations.CreateModel(
name='Channel',
fi
|
elds=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True)),
('enabled', models.BooleanField(default=True)),
('working', models.BooleanField(default=False)),
('lastRetrieved', models.DateTimeField(default=timezone.make_aware(datetime.datetime(1970,1
|
,1)))),
('notes', models.CharField(max_length=1024, null=True, blank=True)),
],
),
]
|
INCF/lib9ML
|
test/serialization_profile.py
|
Python
|
bsd-3-clause
| 1,617
| 0.001237
|
from __future__ import print_function
import shutil
import os.path
import tempfile
import cProfile
import pstats
import nineml
from nineml.utils.comprehensive_example import (
instances_of_all_types, v1_safe_docs)
from nineml.serialization import ext_to_format, format_to_serializer
format_to_ext = dict((v, k) for k, v in ext_to_format.items()) # @UndefinedVariable @IgnorePep8
print_serialized = False
printable = ('xml', 'json', 'yaml')
_tmp_dir = tempfile.mkdtemp()
def function():
for version in (1.0, 2.0):
if version == 1.0:
docs = v1_safe_docs
else:
docs = list(instances_of_all_types['NineML'].values())
for format in format_to_serializer: # @ReservedAssignment
try:
ext = format_to_ext[format]
except KeyError:
continue # ones that can't be written to file (e.g. dict)
for i, document in enumerate(docs):
doc = document.clon
|
e()
url = os.path.join(
_tmp_dir, 'test{}v{}{}'.format(i, version, ext))
nineml.write(url, doc, format=format, version=version,
indent=2)
if print_serialized and format in printable:
with open(url) as f:
print(f.read())
reread_doc = nineml.read(url, reload=True) # @UnusedVariable
s
|
hutil.rmtree(_tmp_dir)
out_file = os.path.join(os.getcwd(), 'serial_profile.out')
cProfile.run('function()', out_file)
p = pstats.Stats(out_file)
p.sort_stats('cumtime').print_stats()
|
Guts/isogeo2sig
|
StandAlone/modules/proxy_checker.py
|
Python
|
gpl-3.0
| 2,322
| 0.004737
|
# -*- coding: UTF-8 -*-
#!/usr/bin/env python
from __future__ import unicode_literals
# ------------------------------------------------------------------------------
# Name: Proxy checker
# Purpose: Just a couple of functions to check various proxy configuration
#
# Author: Julien Moura (@geojulien)
#
# Python: 2.7.x with arcpy
# Created: 10/04/2015
# Updated: 10/04/2015
#
# Licence: GPL 3
# -----------------------------------------------------------------------------
###############################################################################
########### Libraries #############
###################################
# Standard library
import urllib2
import socket
import sys
import string
import os
# 3rd party libraries
import arcpy
##############################################################
|
#################
############ Functions ############
###################################
# execfile("parameters.py")
def is_bad_proxy(pip):
"""
TO COMMENT
"""
try:
proxy_handler = urllib2.ProxyHandler({'http': pip})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent
|
', 'Mozilla/5.0')]
urllib2.install_opener(opener)
req=urllib2.Request('http://www.example.com') # change the URL to test here
sock=urllib2.urlopen(req)
except urllib2.HTTPError, e:
print 'Error code: ', e.code
return e.code
except Exception, detail:
print "ERROR:", detail
return True
return False
def main():
"""
TO COMMENT
"""
socket.setdefaulttimeout(120)
# two sample proxy IPs
proxyList = ['10.0.4.2:3128', '{0}:{1}'.format(prox, port),
'{0}://{1}:{2}@{3}:{4}'.format(protocole, user, password, prox, port)]
for currentProxy in proxyList:
if is_bad_proxy(currentProxy):
print "Bad Proxy %s" % (currentProxy)
arcpy.AddMessage("Bad Proxy")
else:
print "%s is working" % (currentProxy)
arcpy.AddMessage("is working")
###############################################################################
###### Stand alone program ########
###################################
if __name__ == '__main__':
""" standalone execution for testing """
pass
|
IllegalCactus/argument-workbench
|
querytool/search/migrations/0004_searchquery_scope.py
|
Python
|
gpl-3.0
| 437
| 0
|
# -*- coding: utf-8 -*-
f
|
rom __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0003_auto_20150321_1848'),
]
operations = [
migrations.AddField(
model_name='searchquery',
name='scope',
field=models.IntegerField(default=0),
preserve_default=True,
),
|
]
|
dseredyn/velma_planners
|
scripts/tf_pub.py
|
Python
|
gpl-2.0
| 2,492
| 0.008026
|
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer i
|
n the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may
|
be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_planners')
import rospy
import sensor_msgs.msg
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
class TfPub:
def __init__(self):
pass
def spin(self):
self.br = tf.TransformBroadcaster()
rospy.sleep(1.0)
while not rospy.is_shutdown():
rot = PyKDL.Rotation.RotY( 30.0 / 180.0 * math.pi )
q = rot.GetQuaternion()
self.br.sendTransform([0, 0, 1.65], [q[0], q[1], q[2], q[3]], rospy.Time.now(), "head_kinect_link", "world")
if __name__ == '__main__':
rospy.init_node('tf_pub')
v = TfPub()
v.spin()
|
lubokkanev/cloud-system
|
console/main/command_handler/commands/simple_command.py
|
Python
|
gpl-2.0
| 107
| 0
|
from console.main.command_handler.commands.command import Command
class SimpleComm
|
and(Command
|
):
pass
|
howiworkdaily/scofield-project
|
scofield/category/admin.py
|
Python
|
bsd-3-clause
| 898
| 0.010022
|
from models import *
from forms import CategoryImageForm
from django.contrib import admin
class CategoryImageInline(admin.TabularInline):
model = CategoryImage
form
|
= CategoryImageForm
class CategoryOptions(admin.ModelAdmin):
prepopulated_
|
fields = {'slug': ('name',)}
list_display = ['name', 'slug', 'parent', 'sortorder', 'published']
inlines = [
CategoryImageInline,
]
fieldsets = (
(None, {
'fields': ('name', 'slug', 'parent', 'sortorder', 'published',)
}),
('Meta options', {
'classes': ('collapse',),
'fields': ('meta_keywords', 'meta_description', )
}),
)
class CategoryImageAdmin(admin.ModelAdmin):
model = CategoryImage
form = CategoryImageForm
admin.site.register(CategoryImage, CategoryImageAdmin)
admin.site.register(Category, CategoryOptions)
|
yaybu/touchdown
|
touchdown/config/integer.py
|
Python
|
apache-2.0
| 1,092
| 0
|
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "
|
License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the
|
License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument
from . import variable
class Integer(variable.Variable):
resource_name = "integer"
default = argument.Integer()
min = argument.Integer()
max = argument.Integer()
class Set(variable.Set):
resource = Integer
def to_lines(self, value):
return [str(value)]
class Get(variable.Get):
resource = Integer
def from_lines(self, value):
return int(value[0])
argument.Integer.register_adapter(Integer, variable.VariableAsString)
|
airtimemedia/satyr
|
tests/python/gdb.py
|
Python
|
gpl-2.0
| 5,764
| 0.00399
|
#!/usr/bin/env python
import unittest
from test_helpers import *
contents = load_input_contents('../gdb_stacktraces/rhbz-803600')
threads_expected = 2
frames_expected = 227
expected_short_text = '''Thread no. 1 (5 frames)
#0 validate_row at gtktreeview.c:5699
#1 validate_visible_area at gtktreeview.c:5898
#2 gtk_tree_view_bin_expose at gtktreeview.c:4253
#3 gtk_t
|
ree_view_expose at gtktreeview.c:4955
#4 _gtk_marshal_BOOLEAN__BOXED at gtkmarshalers.c:84
'''
expected_short_text_955617 = '''Thread no. 1 (3 frames)
#10 xf86CursorSetCursor at xf86Cursor.c:333
#11 xf86CursorEnableDisableFBAccess at xf86Cursor.c:233
#12 ??
'''
class TestGdbStacktrace(BindingsTestCase):
def setUp(self):
self.trace = satyr.GdbStacktrace(contents)
def test_
|
correct_thread_count(self):
self.assertEqual(len(self.trace.threads), threads_expected)
def test_correct_frame_count(self):
self.assertEqual(frame_count(self.trace), frames_expected)
def test_dup(self):
dup = self.trace.dup()
self.assertNotEqual(id(dup.threads), id(self.trace.threads))
self.assertTrue(all(map(lambda t1, t2: t1.equals(t2), dup.threads, self.trace.threads)))
dup.threads = dup.threads[:5]
dup2 = dup.dup()
self.assertNotEqual(id(dup.threads), id(dup2.threads))
self.assertTrue(all(map(lambda t1, t2: t1.equals(t2), dup.threads, dup2.threads)))
def test_prepare_linked_list(self):
dup = self.trace.dup()
dup.threads = dup.threads[:5]
dup.normalize()
self.assertTrue(len(dup.threads) <= 5)
def test_normalize(self):
dup = self.trace.dup()
dup.normalize()
self.assertNotEqual(frame_count(dup), frame_count(self.trace))
def test_str(self):
out = str(self.trace)
self.assertTrue(('Stacktrace with %d threads' % threads_expected) in out)
def test_to_short_text(self):
self.assertEqual(self.trace.to_short_text(5), expected_short_text)
def test_bthash(self):
self.assertEqual(self.trace.get_bthash(), 'd0fcdc87161ccb093f7efeff12218321d8fd5298')
def test_crash_thread(self):
self.assertTrue(self.trace.crash_thread is self.trace.threads[1])
def test_hash(self):
self.assertHashable(self.trace)
def test_short_text_normalization(self):
contents = load_input_contents('../gdb_stacktraces/rhbz-955617')
trace = satyr.GdbStacktrace(contents)
self.assertEqual(trace.to_short_text(5), expected_short_text_955617)
class TestGdbThread(BindingsTestCase):
def setUp(self):
self.thread = satyr.GdbStacktrace(contents).threads[0]
def test_getset(self):
self.assertGetSetCorrect(self.thread, 'number', 2, 9000)
def test_equals(self):
self.assertTrue(self.thread.equals(self.thread))
dup = self.thread.dup()
self.assertTrue(self.thread.equals(dup))
dup.number = 9000
self.assertFalse(self.thread.equals(dup))
def test_duphash(self):
expected_plain = 'Thread\n write\n virNetSocketWriteWire\n virNetSocketWrite\n'
self.assertEqual(self.thread.get_duphash(flags=satyr.DUPHASH_NOHASH, frames=3), expected_plain)
self.assertEqual(self.thread.get_duphash(), '01d2a92281954a81dee9098dc4f8056ef5a5a5e1')
def test_hash(self):
self.assertHashable(self.thread)
class TestGdbSharedlib(BindingsTestCase):
def setUp(self):
self.shlib = satyr.GdbStacktrace(contents).libs[0]
def test_getset(self):
self.assertGetSetCorrect(self.shlib, 'start_address', 0x3ecd63c680, 10)
self.assertGetSetCorrect(self.shlib, 'end_address', 0x3ecd71f0f8, 20)
self.assertGetSetCorrect(self.shlib, 'symbols', satyr.SYMS_OK, satyr.SYMS_WRONG)
self.assertGetSetCorrect(self.shlib, 'soname', '/usr/lib64/libpython2.6.so.1.0', '/dev/null')
def test_hash(self):
self.assertHashable(self.shlib)
class TestGdbFrame(BindingsTestCase):
def setUp(self):
self.frame = satyr.GdbStacktrace(contents).threads[0].frames[0]
def test_str(self):
out = str(self.frame)
self.assertTrue('0x0000003ec220e48d' in out)
self.assertTrue('write' in out)
self.assertTrue('Frame #0' in out)
def test_dup(self):
dup = self.frame.dup()
self.assertEqual(dup.function_name,
self.frame.function_name)
dup.function_name = 'other'
self.assertNotEqual(dup.function_name,
self.frame.function_name)
def test_cmp(self):
dup = self.frame.dup()
self.assertTrue(dup.equals(dup))
self.assertTrue(dup.equals(self.frame))
dup.function_name = 'another'
self.assertFalse(dup.equals(self.frame))
def test_getset(self):
self.assertGetSetCorrect(self.frame, 'function_name', 'write', 'foo bar')
self.assertGetSetCorrect(self.frame, 'function_type', None, 'Maybe Integer')
self.assertGetSetCorrect(self.frame, 'number', 0, 42)
self.assertGetSetCorrect(self.frame, 'source_file', '../sysdeps/unix/syscall-template.S', 'ok.c')
self.assertGetSetCorrect(self.frame, 'source_line', 82, 1337)
self.assertGetSetCorrect(self.frame, 'signal_handler_called', False, True)
self.assertGetSetCorrect(self.frame, 'address', 0x3ec220e48d, 0x666)
self.assertGetSetCorrect(self.frame, 'address', 0x666, 4398046511104)
## 2^66, this is expected to fail
#self.assertGetSetCorrect(self.frame, 'address', 4398046511104, 73786976294838206464L)
self.assertGetSetCorrect(self.frame, 'library_name', None, 'sowhat.so')
def test_hash(self):
self.assertHashable(self.frame)
if __name__ == '__main__':
unittest.main()
|
akvo/akvo-rsr
|
akvo/rsr/migrations/0079_auto_20160620_1418.py
|
Python
|
agpl-3.0
| 1,137
| 0.004398
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
def sector_validation(apps, schema_editor):
""" Remove sector from RSR validation set """
ProjectEditorValidation = apps.get_model('rsr', 'ProjectEditorVa
|
lidation')
sector_validators = ['rsr_sector', 'rsr_sector.se
|
ctor_code', 'rsr_sector.vocabulary']
for v in sector_validators:
validation = ProjectEditorValidation.objects.filter(validation_set_id=1, validation__exact=v)
if validation:
validation.delete()
def undo_sector_validation(apps, schema_editor):
""" Remove sector from RSR validation set """
ProjectEditorValidation = apps.get_model('rsr', 'ProjectEditorValidation')
sector_validators = ['rsr_sector', 'rsr_sector.sector_code', 'rsr_sector.vocabulary']
for v in sector_validators:
ProjectEditorValidation.objects.get_or_create(validation=v, action=1, validation_set_id=1)
class Migration(migrations.Migration):
dependencies = [
('rsr', '0078_auto_20160613_1428'),
]
operations = [
migrations.RunPython(sector_validation, undo_sector_validation),
]
|
alisaifee/limits
|
tests/storage/test_memcached.py
|
Python
|
mit
| 3,218
| 0.000622
|
import time
import pymemcache.client
import pytest
from limits import RateLimitItemPerMinute, RateLimitItemPerSecond
from limits.storage import MemcachedStorage, storage_from_string
from limits.strategies import (
FixedWindowElasticExpiryRateLimiter,
FixedWindowRateLimiter,
)
from tests.utils import fixed_start
@pytest.mark.memcached
@pytest.mark.flaky
class TestMemcachedStorage:
@pytest.fixture(autouse=True)
def setup(self, memcached, memcached_cluster):
self.storage_url = "memcached://localhost:22122"
def test_init_options(self, mocker):
constructor = mocker.spy(pymemcache.client, "PooledClient")
assert storage_from_string(self.storage_url, connect_timeout=1).check()
assert constructor.call_args[1]["connect_timeout"] == 1
@fixed_start
def test_fixed_window(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_with_elastic_expiry(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
assert limiter.test(per_sec)
@fixed_start
def test_fixed_window_with_elastic_expiry_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
|
assert limiter.test(per_sec)
def test_clear(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWi
|
ndowRateLimiter(storage)
per_min = RateLimitItemPerMinute(1)
limiter.hit(per_min)
assert not limiter.hit(per_min)
limiter.clear(per_min)
assert limiter.hit(per_min)
|
birsoyo/conan
|
conans/test/generators/custom_generator_test.py
|
Python
|
mit
| 3,848
| 0.001819
|
import unittest
from conans.test.utils.tools import TestServer, TestClient
from conans.model.ref import ConanFileReference
import os
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE, CONANFILE_TXT
from conans.util.files import load
generator = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustom_Generator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return "My custom generator content"
class MyCustomGeneratorPackage(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer = """
[requires]
Hello0/0.1@lasote/stable
MyCustomGen/0.2@lasote/stable
[generators]
MyCustom_Generator
"""
generator_multi = """
from conans.model import Generator
from conans.paths import BUILD_INFO
from conans import ConanFile, CMake
class MyCustomMultiGenerator(Generator):
@property
def filename(self):
return "customfile.gen"
@property
def content(self):
return {"file1.gen": "CustomContent1",
"file2.gen": "CustomContent2"}
class NoMatterTheName(ConanFile):
name = "MyCustomGen"
version = "0.2"
"""
consumer_multi = """
[requires]
MyCustomGen/0.2@lasote/stable
[generators]
MyCustomMultiGenerator
"""
class CustomGeneratorTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
def reuse_test(self):
conan_reference = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(conan_reference))
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
files = {CONANFILE: generator}
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer}
client.save(files, clean_first=True)
client.run("install . --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
# Test retrieval from remote
client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE_TXT: consumer}
client.save(files)
client.run("install . --build")
generated = load(os.path.join(client.current_folder, "customfile.gen"))
self.assertEqual(generated, "My custom generator content")
def multifile_test(self):
gen_reference = ConanFileReference.loads("MyCustomGen/0.2@lasote/stable")
client = TestClient(servers=s
|
elf.servers, users={"default": [("lasote", "mypass")]})
files = {CONANFILE: generator_multi}
client.save(files)
client.run("export . lasote/stable")
client.run("upload %s" % str(gen_reference))
# Test local, no retrieval
files = {CONANFILE_TXT: consumer_multi}
client.save(
|
files, clean_first=True)
client.run("install . --build")
self.assertIn("Generator MyCustomMultiGenerator is multifile. "
"Property 'filename' not used",
client.user_io.out)
for i in (1, 2):
generated = load(os.path.join(client.current_folder, "file%d.gen" % i))
self.assertEqual(generated, "CustomContent%d" % i)
|
openstack/dragonflow
|
dragonflow/tests/unit/test_chassis_snat_app.py
|
Python
|
apache-2.0
| 3,471
| 0
|
# Copyright (c) 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from dragonflow.controller.common import constants
from dragonflow.tests.unit import test_app_base
class TestChassisSNATApp(test_app_base.DFAppTestBase):
apps_list = ["chassis_snat"]
external_host_ip = '172.24.4.100'
def setUp(self):
cfg.CONF.set_override('external_host_ip',
self.external_host_ip,
group='df')
super(TestChassisSNATApp, self).setUp()
self.SNAT_app = self.open_flow_app.dispatcher.apps['chassis_snat']
self.SNAT_app.external_ofport = 99
def test_switch_features_handler(self):
ev = mock.Mock()
ev.msg.datapath.ofproto.OFP_VERSION = 0x04
open_flow_app = self.controller.switch_backend.open_flow_app
open_flow_app.switch_features_hand
|
ler(ev)
self.SNAT_app.add_flow_go_to_table.assert_has_calls(
[mock.call(
constants.L3_LOOKUP_TABLE,
|
constants.PRIORITY_LOW,
constants.EGRESS_NAT_TABLE,
match=mock.ANY)])
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_CLASSIFICATION_DISPATCH_TABLE,
priority=constants.PRIORITY_DEFAULT,
match=mock.ANY),
mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_NAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY),
mock.call(
inst=mock.ANY,
table_id=constants.EGRESS_NAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY),
mock.call(
actions=mock.ANY,
table_id=constants.EGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
def test_add_local_port(self):
self.controller.update(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
inst=mock.ANY,
table_id=constants.INGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
def test_remove_local_port(self):
self.controller.update(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.reset_mock()
self.controller.delete(test_app_base.fake_local_port1)
self.SNAT_app.mod_flow.assert_has_calls(
[mock.call(
command=mock.ANY,
table_id=constants.INGRESS_SNAT_TABLE,
priority=constants.PRIORITY_LOW,
match=mock.ANY)])
|
dichen001/Go4Jobs
|
JackChen/hash/299. Bulls and Cows.py
|
Python
|
gpl-3.0
| 1,587
| 0.003781
|
"""
You are playing the fol
|
lowing Bulls and Cows game with your friend: You write down a number and ask your friend to guess what the number is. Each time your friend makes a guess, you provide a hint that indicates how many digits in said guess match your secret number exactly in both digit and position (called "bulls") and how many digits match the secret number but locate in the wrong position (called "
|
cows"). Your friend will use successive guesses and hints to eventually derive the secret number.
For example:
Secret number: "1807"
Friend's guess: "7810"
Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.)
Write a function to return a hint according to the secret number and friend's guess, use A to indicate the bulls and B to indicate the cows. In the above example, your function should return "1A3B".
Please note that both secret number and friend's guess may contain duplicate digits, for example:
Secret number: "1123"
Friend's guess: "0111"
In this case, the 1st 1 in friend's guess is a bull, the 2nd or 3rd 1 is a cow, and your function should return "1A1B".
You may assume that the secret number and your friend's guess only contain digits, and their lengths are always equal.
"""
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = [c for i, c in enumerate(secret) if guess[i] == c]
total = sum(min(secret.count(c), guess.count(c)) for c in '0123456789')
return '%dA%dB' % (len(bulls), total - len(bulls))
|
elcr/muhblog
|
muhblog/utils.py
|
Python
|
mit
| 3,071
| 0
|
import re
import math as maths
from typing import Iterable, Dict
from base64 import b64encode
from flask import Response, render_template, make_response, current_app
from peewee import SelectQuery
from htmlmin.minify import html_minify
from .models import Entry
PAGE_GROUP_SIZE = 5
ENTRIES_PER_PAGE = 10
class Paginator:
query: SelectQuery
current_page: int
def __init__(self, query: SelectQuery, current_page: int) -> None:
self.query = query
self.current_page = current_page
def get_entries(self) -> Iterable[Entry]:
return self.query.paginate(self.current_page, ENTRIES_PER_PAGE) \
.iterator()
def get_total_pages(self) -> int:
return maths.ceil(self.query.count() / ENTRIES_PER_PAGE)
def has_previous_page(self) -> bool:
return self.current_page != 1
def has_next_page(self) -> bool:
return self.current_page != self.get_total_pages()
def page_number_group(self) -> Iterable[int]:
padding = PAGE_GROUP_SIZE // 2
start_page = self.current_page - padding
end_page = self.current_page + padding
total_pages = self.get_total_pages()
if start_page < 1 and end_page > total_pages:
start_
|
page = 1
end_page = total_pages
else:
if start_page < 1:
difference = 1 - start_page
start_page += difference
end_page += difference
i
|
f end_page > total_pages:
difference = end_page - total_pages
end_page -= difference
start_page -= difference
if start_page < 1:
start_page = 1
return range(start_page, end_page + 1)
_template_cache: Dict[str, str] = {}
def _get_js() -> str:
if 'js' not in _template_cache:
with current_app.open_resource('static/bundle.js') as file:
_template_cache['js'] = file.read().decode('utf-8')
return _template_cache['js']
def _get_css() -> str:
if 'css' not in _template_cache:
with current_app.open_resource('static/bundle.css') as file:
_template_cache['css'] = re.sub(
pattern=r'^.+?\*\/',
repl='',
string=file.read().decode('utf-8'),
count=1,
flags=re.DOTALL
)
return _template_cache['css']
def _get_favicon_url() -> str:
if 'favicon_url' not in _template_cache:
with current_app.open_resource('static/favicon.png') as file:
favicon_bytes = file.read()
encoded = b64encode(favicon_bytes).decode('utf-8')
_template_cache['favicon_url'] = f'data:image/png;base64,{encoded}'
return _template_cache['favicon_url']
def template_response(*args, status_code: int = 200, **kwargs) -> Response:
html = render_template(
*args,
**kwargs,
js=_get_js(),
css=_get_css(),
favicon_url=_get_favicon_url()
)
html = html_minify(html)
return make_response(html, status_code)
|
redreamality/thefuck
|
thefuck/logs.py
|
Python
|
mit
| 3,065
| 0.000327
|
# -*- encoding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime
import sys
from traceback import format_exception
import colorama
from .conf import settings
def color(color_):
"""Utility for ability to disabling colored output."""
if settings.no_colors:
return ''
else:
return color_
def exception(title, exc_info):
sys.stderr.write(
u'{warn}[WARN] {title}:{reset}\n{trace}'
u'{warn}----------------------------{reset}\n\n'.format(
warn=color(colorama.Back.RED + colorama.Fore.WHITE
+ colorama.Style.BRIGHT),
res
|
et=color(colorama.Style.RESET_ALL),
title=title,
trace=''.join(format_exception(*exc_info))))
def rule_failed(rule, exc_info):
exception('Rule {}'.format(rule.name), exc_info)
def failed(msg):
sys.stderr.write('{red}{msg}{reset}\n'.format(
msg=msg,
red=color(colorama.Fore.RED),
|
reset=color(colorama.Style.RESET_ALL)))
def show_corrected_command(corrected_command):
sys.stderr.write('{bold}{script}{reset}{side_effect}\n'.format(
script=corrected_command.script,
side_effect=' (+side effect)' if corrected_command.side_effect else '',
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL)))
def confirm_text(corrected_command):
sys.stderr.write(
('{clear}{bold}{script}{reset}{side_effect} '
'[{green}enter{reset}/{blue}↑{reset}/{blue}↓{reset}'
'/{red}ctrl+c{reset}]').format(
script=corrected_command.script,
side_effect=' (+side effect)' if corrected_command.side_effect else '',
clear='\033[1K\r',
bold=color(colorama.Style.BRIGHT),
green=color(colorama.Fore.GREEN),
red=color(colorama.Fore.RED),
reset=color(colorama.Style.RESET_ALL),
blue=color(colorama.Fore.BLUE)))
def debug(msg):
if settings.debug:
sys.stderr.write(u'{blue}{bold}DEBUG:{reset} {msg}\n'.format(
msg=msg,
reset=color(colorama.Style.RESET_ALL),
blue=color(colorama.Fore.BLUE),
bold=color(colorama.Style.BRIGHT)))
@contextmanager
def debug_time(msg):
started = datetime.now()
try:
yield
finally:
debug(u'{} took: {}'.format(msg, datetime.now() - started))
def how_to_configure_alias(configuration_details):
print("Seems like {bold}fuck{reset} alias isn't configured!".format(
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL)))
if configuration_details:
content, path = configuration_details
print(
"Please put {bold}{content}{reset} in your "
"{bold}{path}{reset}.".format(
bold=color(colorama.Style.BRIGHT),
reset=color(colorama.Style.RESET_ALL),
path=path,
content=content))
print('More details - https://github.com/nvbn/thefuck#manual-installation')
|
cdawei/digbeta
|
dchen/music/src/PLA_pop.py
|
Python
|
gpl-3.0
| 3,102
| 0.000322
|
import os
import sys
import gzip
import time
import numpy as np
import pickle as pkl
from scipy.sparse import hstack
from sklearn.metrics import roc
|
_auc_score
from models import MTC
if len(sys.argv) != 7:
print('Usage: python', sys.
|
argv[0],
'WORK_DIR DATASET C P N_SEED TRAIN_DEV(Y/N)')
sys.exit(0)
else:
work_dir = sys.argv[1]
dataset = sys.argv[2]
C = float(sys.argv[3])
p = float(sys.argv[4])
n_seed = int(sys.argv[5])
trndev = sys.argv[6]
assert trndev in ['Y', 'N']
data_dir = os.path.join(work_dir, 'data/%s/setting2' % dataset)
if trndev == 'N':
fxtrain = os.path.join(data_dir, 'X_train_pop_%d.pkl.gz' % n_seed)
fytrain = os.path.join(data_dir, 'Y_train.pkl.gz')
fytrndev = os.path.join(data_dir, 'Y_trndev.pkl.gz')
fydev = os.path.join(data_dir, 'PU_dev_%d.pkl.gz' % n_seed)
fcliques = os.path.join(data_dir, 'cliques_trndev.pkl.gz')
fprefix = 'pop-%g-%g-%g' % (n_seed, C, p)
else:
fxtrain = os.path.join(data_dir, 'X_trndev_pop_%d.pkl.gz' % n_seed)
fytrain = os.path.join(data_dir, 'Y_trndev.pkl.gz')
fytrndev = os.path.join(data_dir, 'Y.pkl.gz')
fydev = os.path.join(data_dir, 'PU_test_%d.pkl.gz' % n_seed)
fcliques = os.path.join(data_dir, 'cliques_all.pkl.gz')
fprefix = 'trndev-pop-%g-%g-%g' % (n_seed, C, p)
fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix)
fnpy = os.path.join(data_dir, '%s.npy' % fprefix)
X_train = pkl.load(gzip.open(fxtrain, 'rb'))
Y_train = pkl.load(gzip.open(fytrain, 'rb'))
Y_train_dev = pkl.load(gzip.open(fytrndev, 'rb'))
PU_dev = pkl.load(gzip.open(fydev, 'rb'))
cliques = pkl.load(gzip.open(fcliques, 'rb'))
print('N_SEED: %g, C: %g, p: %g' % (n_seed, C, p))
print(X_train.shape, Y_train.shape)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
if os.path.exists(fmodel):
print('evaluating ...')
clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation
else:
print('training ...')
Y = hstack([Y_train, PU_dev]).tocsc().astype(np.bool)
clf = MTC(X_train, Y, C=C, p=p, user_playlist_indices=cliques, label_feature=False)
clf.fit(njobs=1, verbose=2, fnpy=fnpy)
if clf.trained is True:
# pkl.dump(clf, gzip.open(fmodel, 'wb'))
Y_dev = Y_train_dev[:, -PU_dev.shape[1]:]
offset = Y_train_dev.shape[1] - PU_dev.shape[1]
rps = []
aucs = []
for j in range(Y_dev.shape[1]):
y1 = Y_dev[:, j].toarray().reshape(-1)
y2 = PU_dev[:, j].toarray().reshape(-1)
indices = np.where(0 == y2)[0]
y_true = y1[indices]
npos = y_true.sum()
assert npos > 0
assert npos + y2.sum() == y1.sum()
k = offset + j
u = clf.pl2u[k]
wk = clf.V[u, :] + clf.W[k, :] + clf.mu
X = X_train
y_pred = np.dot(X, wk)[indices]
sortix = np.argsort(-y_pred)
y_ = y_true[sortix]
rps.append(np.mean(y_[:npos]))
aucs.append(roc_auc_score(y_true, y_pred))
clf.metric_score = (np.mean(aucs), np.mean(rps), len(rps), Y_dev.shape[1])
pkl.dump(clf, gzip.open(fmodel, 'wb'))
print('\n%.5f, %.5f, %d / %d' % clf.metric_score)
|
rbarrois/aionotify
|
setup.py
|
Python
|
bsd-2-clause
| 1,579
| 0
|
#!/usr/bin/env python
# Copyright (c) 2016 The aionotify project
# This code is distributed under the two-clause BSD License.
import codecs
import os
import re
import sys
from setuptools import setup
root_dir = os.path.abspath(os.path.dirname(__file__))
def get_version(package_name):
version_re = re.compile(r"^__version__ = [\"']([\w_.-]+)[\"']$")
package_components = package_name.split('.')
init_path = os.path.join(root_dir, *(package_components + ['__init__.py']))
with codecs.open(init_path, 'r', 'utf-8') as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
PACKAGE = 'aionotify'
setup(
name=PACKAGE,
version=get_version(PACKAGE),
|
description="Asyncio-powered inotify library",
author="Raphaël Barrois",
author_email="raphael.barrois+%s@polytechnique.org" % PACKAGE,
url='https://github.com/rbarrois/%s' % PACKAGE,
keywords=['asyncio', 'inotify'],
packages=[PACKAGE],
license='BSD',
setup_requires=[
],
tests_require=[
'asynctest',
],
classifiers=[
"Developmen
|
t Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Filesystems",
],
test_suite='tests',
)
|
allenlavoie/tensorflow
|
tensorflow/contrib/quantize/python/quantize_graph_test.py
|
Python
|
apache-2.0
| 11,911
| 0.009151
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for the quantize_graph graph rewriting API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import quantize_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class QuantizeGraphTest(test_util.TensorFlowTestCase):
# We have a lot of other tests that test the details of the rewrite, here we
# just the specific features of the quantize_graph API.
def _RunTestOverAllRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverTrainingRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_training_graph,
quantize_graph.experimental_create_training_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverEvalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.create_eval_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewrites(self, test_fn):
rewrite_fns = [
quantize_graph.experimental_create_training_graph,
quantize_graph.experimental_create_eval_graph,
]
for fn in rewrite_fns:
test_fn(fn)
def _RunTestOverExperimentalRewritesWithScope(self, test_fn, scope):
def with_absent_scope(fn):
def fn_with_absent_scope(*args):
fn(*args, scope=scope)
return fn_with_absent_scope
rewrite_fns = [
with_absent_scope(
quantize_graph.experimental_create_training_graph),
with_absent_scope(
quantize_graph.experimental_create_eval_graph),
]
for fn in rewrite_fns:
test_fn(fn)
def testRewrite(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestRewrite(self, rewrite_fn):
graph = ops.Graph()
with graph.as_default():
self._ConvLayer()
orig_variable_names = set(
[v.name for v in graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn(graph)
q_variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testDefaultGraph(self):
self._RunTestOverAllRewrites(self._TestRewrite)
def _TestDefaultGraph(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer()
orig_variable_names = set(
[v.name for v in g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
rewrite_fn()
q_variables = g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
# Ensure that variables were added.
self.assertTrue(len(orig_variable_names) < len(q_variables))
def testWithPreActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPreActivationBypass)
def _TestWithPreActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(pre_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(
any('scope1/add_quant/' in name for name in op_names))
def testWithPostActivationBypass(self):
self._RunTestOverAllRewrites(self._TestWithPostActivationBypass)
def _TestWithPostActivationBypass(self, rewrite_fn):
# Tests that the default graph is correctly used when no args are provided
# to rewrite_fn.
with ops.Graph().as_default() as g:
self._ConvLayer(post_activation_bypass=True, scope='scope1')
rewrite_fn()
op_names = [op.name for op in g.get_operations()]
self.assertTrue(any(
'scope1/post_activation_bypass_quant/' in name for name in op_names))
def testQuantDelay(self):
self._RunTestOverTrainingRewrites(self._TestQuantDelay)
def _TestQuantDelay(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
quant_delay = 100
rewrite_fn(quant_delay=quant_delay)
quant_delay_found = False
for op in g.get_operations():
# Check to see if the quant_delay is correctly set.
if 'activate_quant' in op.name and op.type == 'Const':
|
quant_delay_found = True
const_value = str(op.get_attr('value'))
self.
|
assertTrue(('int64_val: %i' % quant_delay) in const_value)
self.assertTrue(quant_delay_found)
def testWeightBits(self):
self._RunTestOverExperimentalRewrites(self._TestWeightBits)
def _TestWeightBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
weight_bits = 4
rewrite_fn(weight_bits=weight_bits)
weights_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for weights have the right bits
# set.
if 'weights_quant' in op.name and op.type == 'FakeQuantWithMinMaxVars':
weights_quant_found = True
self.assertEqual(op.get_attr('num_bits'), weight_bits)
self.assertTrue(weights_quant_found)
def testActivationBits(self):
self._RunTestOverExperimentalRewrites(self._TestActivationBits)
def _TestActivationBits(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
activation_bits = 4
rewrite_fn(activation_bits=activation_bits)
act_quant_found = False
for op in g.get_operations():
# Check to see if FakeQuant operations for activations have the right bits
# set.
act_quant_names = ['act_quant', 'conv_quant', 'add_quant']
if any(s in op.name
for s in act_quant_names) and op.type == 'FakeQuantWithMinMaxVars':
act_quant_found = True
self.assertEqual(op.get_attr('num_bits'), activation_bits)
self.assertTrue(act_quant_found)
def testTrainingQuantization(self):
self._RunTestOverTrainingRewrites(self._TestTrainingQuantization)
def _TestTrainingQuantization(self, rewrite_fn):
with ops.Graph().as_default() as g:
self._ConvLayer()
rewrite_fn()
# Ensure that FakeQuant and variable update nodes were found.
quant_found = False
assign_min_last_found = False
assign_min_ema_found = False
assign_max_last_found = False
assign_max_ema_found = False
for op in g.get_operations():
# Check that FakeQuant operations were added.
if op.type == 'FakeQuantWithMinMaxVars':
quant_found = True
# Check that update operations for the added min max variables exist in
# the graph.
if 'AssignMinLast' in op.name:
assign_min_last_found = True
elif 'AssignMinEma' in op.name:
assign_min_ema_found = True
elif 'AssignMaxLast' in op.name:
assign_max_last_found = True
elif 'AssignMaxEma' in op.name:
assign_max_ema_found = True
self.assertTrue(ass
|
justacec/bokeh
|
examples/app/selection_histogram.py
|
Python
|
bsd-3-clause
| 3,598
| 0.005281
|
''' Present a scatter plot with linked histograms on both axes.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve selection_histogram.py
at your command prompt. Then navigate to the URL
http://localhost:5006/selection_histogram
in your browser.
'''
import numpy as np
from bokeh.models import BoxSelectTool, LassoSelectTool, Paragraph
from bokeh.plotting import figure, hplot, vplot, curdoc
# create three normal population samples with different parameters
x1 = np.random.normal(loc=5.0, size=400) * 100
y1 = np.random.normal(loc=10.0, size=400) * 10
x2 = np.random.normal(loc=5.0, size=800) * 50
y2 = np.random.normal(loc=5.0, size=800) * 10
x3 = np.random.normal(loc=55.0, size=200) * 10
y3 = np.random.normal(loc=4.0, size=200) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
TOOLS="pan,wheel_zoom,box_select,lasso_select"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, title=None, min_border=10, min_border_left=50)
r = p.scatter(x, y, size=3, color="#3A5785", alpha=0.6)
p.select(BoxSelectTool).select_every_mousemove = False
p.select(LassoSelectTool).select_every_mousemove = False
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
LINE_ARGS = dict(color="#3A5785", line_color=None)
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), title=None, min_border=10, min_border_left=50)
ph.xgrid.grid_line_color = None
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
hh1 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.5, **LINE_ARGS)
hh2 = ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, alpha=0.1, **LINE_ARGS)
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
th = 42 # need to adjust for toolbar height, unfortunately
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height+th-10, x_range=(-vmax, vmax),
y_range=p.y_range, title=None, min_border=10, min_border_top=th)
pv.ygrid.grid_line_color = None
pv.xaxis.major_label_orientation = -3.14/2
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
vh1 = pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, alpha=0.5, **LINE_ARGS)
vh2 = pv.quad(left=0, bottom=vedges[:-1],
|
top=vedges[1:], right=vzeros, alpha=0.1, **LINE_ARGS)
pv.min_border_top = 80
pv.min_border_left = 0
ph.min_border_top = 10
|
ph.min_border_right = 10
p.min_border_right = 10
layout = vplot(hplot(p, pv), hplot(ph, Paragraph(width=200)), width=800, height=800)
curdoc().add_root(layout)
def update(attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist1, hhist2 = hzeros, hzeros
vhist1, vhist2 = vzeros, vzeros
else:
neg_inds = np.ones_like(x, dtype=np.bool)
neg_inds[inds] = False
hhist1, _ = np.histogram(x[inds], bins=hedges)
vhist1, _ = np.histogram(y[inds], bins=vedges)
hhist2, _ = np.histogram(x[neg_inds], bins=hedges)
vhist2, _ = np.histogram(y[neg_inds], bins=vedges)
hh1.data_source.data["top"] = hhist1
hh2.data_source.data["top"] = -hhist2
vh1.data_source.data["right"] = vhist1
vh2.data_source.data["right"] = -vhist2
r.data_source.on_change('selected', update)
|
swamireddy/python-cinderclient
|
cinderclient/tests/v2/test_volume_backups.py
|
Python
|
apache-2.0
| 2,562
| 0
|
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class VolumeBackupsTest(utils.TestCase):
def test_create(self):
cs.backups.create('2b695faf-b963-40c8-8464-274008fbcef4')
cs.assert_called('POST', '/backups')
def test_get(self):
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
cs.backups.get(backup_id)
cs.assert_called('GET', '/backups/%s' % backup_id)
def test_list(self):
cs.backups.list()
cs.assert_called('GET', '/backups/detail')
def test_delete(self):
b = cs.backups.list()[0]
b.delete()
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
cs.backups.delete('76a17945-3c6f-435c-975b-b5685db10
|
b62')
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
cs.backups.delete(b)
cs.assert_called('DELETE',
'/backups/76a17945-3c6f-435c-975b-b5685db10b62')
def test_restore(self):
backup_id = '76a17945-3c6f-435c-975b-b5685db10b62'
cs.restores.restore(backup_id)
cs.assert_called('POST', '/backups/%s/restore' % backup_id)
def test_record_export(self):
backup_id = '76
|
a17945-3c6f-435c-975b-b5685db10b62'
cs.backups.export_record(backup_id)
cs.assert_called('GET',
'/backups/%s/export_record' % backup_id)
def test_record_import(self):
backup_service = 'fake-backup-service'
backup_url = 'fake-backup-url'
expected_body = {'backup-record': {'backup_service': backup_service,
'backup_url': backup_url}}
cs.backups.import_record(backup_service, backup_url)
cs.assert_called('POST', '/backups/import_record', expected_body)
|
rwl/openpowersystem
|
cpsm/topology/connectivity_node.py
|
Python
|
agpl-3.0
| 2,078
| 0.006256
|
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Connectivity nodes are points where terminals of conducting equipment are connected together with zero impedance.
"""
# <<< imports
# @generated
from cpsm.core.identified_object import IdentifiedObject
from cpsm.core.connectivity_node_container import ConnectivityNodeContainer
from google.appengine.ext import db
# >>> imports
class ConnectivityNode(IdentifiedObject):
""" Connectivity nodes are points where terminals of conducting equipment are connected together with zero impedance.
"""
# <<< connectivity_node.attributes
# @generated
# >>> connectivity_nod
|
e.attributes
# <<< connectivity_node.references
# @generated
# Virtual property. Terminals interconnect with zero impedance at a node. Measurements on a node apply to all of its terminals.
pass # terminals
# Container of this connectivity
|
node.
member_of_equipment_container = db.ReferenceProperty(ConnectivityNodeContainer,
collection_name="connectivity_nodes")
# >>> connectivity_node.references
# <<< connectivity_node.operations
# @generated
# >>> connectivity_node.operations
# EOF -------------------------------------------------------------------------
|
suutari-ai/shoop
|
shuup/admin/module_registry.py
|
Python
|
agpl-3.0
| 1,750
| 0.000571
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import six
from shuup.apps.provides import get_provide_objects
from shuup.utils.importing import cached_load, load
_registry = []
def register(module_class):
if isinstance(module_class, six.string_types):
module_class = load(module_class, "Admin Module")
_registry.append(module_class())
def discover():
for obj in get_provide_objects("admin_module"):
register(obj)
def get_admin_modules():
"""
:rtype: list[shuup.admin.base.AdminModule]
"""
if not _registry:
discover()
return iter(_registry)
def get_modules():
"""
:rtype: list[shuup.admin.base.AdminModule]
"""
get_modules_spec = cached_load("SHUUP_GET_ADMIN_MODULES_SPEC")
return get_modules_spec()
def get_module_urls():
for module in get_modules(): # pragma: no branch
for url in module.get_urls(): # pragma: no branch
yield url
@contextl
|
ib.contextmanager
def replace_modules(new_module_classes):
"""
Context manager to temporarily replace all modules with something else.
Test
|
utility, mostly.
>>> def some_test():
... with replace_modules(["foo.bar:QuuxModule"]):
... pass # do stuff
:param new_module_classes: Iterable of module classes, like you'd pass to `register`
"""
old_registry = _registry[:]
_registry[:] = []
for cls in new_module_classes:
register(cls)
try:
yield
finally:
_registry[:] = old_registry
|
chromium2014/src
|
chrome/common/extensions/docs/server2/document_renderer_test.py
|
Python
|
bsd-3-clause
| 6,119
| 0.002125
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from document_renderer import DocumentRenderer
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_data.canned_data import CANNED_TEST_FILE_SYSTEM_DATA
class DocumentRendererUnittest(unittest.TestCase):
def setUp(self):
self._renderer = ServerInstance.ForTest(
TestFileSystem(CANNED_TEST_FILE_SYSTEM_DATA)).document_renderer
def testNothingToSubstitute(self):
document = 'hello world'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitles(self):
document = '<h1>title</h1> then $(title) then another $(title)'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(document, text)
self.assertEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual('<h1>title</h1> then title then another $(title)', text)
self.assertEqual([], warnings)
def testTocs(self):
document = ('here is a toc $(table_of_contents) '
'and another $(table_of_contents)')
expected_document = ('here is a toc <table-of-contents> and another '
'$(table_of_contents)')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testRefs(self):
# The references in this and subsequent tests won't actually be resolved
document = 'A ref $(ref:baz.baz_e1) here, $(ref:foo.foo_t3 ref title) there'
expected_document = ('A ref <a href=#type-baz_e1>baz.baz_e1</a> '
'here, <a href=#type-foo_t3>ref title</a> '
'there')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testTitleAndToc(self):
document = '<h1>title</h1> $(title) and $(table_of_contents)'
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual('<h1>title</h1> $(title) and <table-of-contents>', text)
self.asse
|
rtEqual(['Found unexpected title "title"'], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual('<h1>title</h1> title and <table-of-contents>', text)
self.assertEqual([], warnings)
def testRefInTitl
|
e(self):
document = '<h1>$(ref:baz.baz_e1 title)</h1> A $(title) was here'
expected_document_no_title = ('<h1><a href=#type-baz_e1>'
'title</a></h1> A $(title) was here')
expected_document = ('<h1><a href=#type-baz_e1>title</a></h1>'
' A title was here')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document_no_title, text)
self.assertEqual([('Found unexpected title "title"')], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
def testRefSplitAcrossLines(self):
document = 'Hello, $(ref:baz.baz_e1 world). A $(ref:foo.foo_t3\n link)'
expected_document = ('Hello, <a href=#type-baz_e1>world</a>. A <a href='
'#type-foo_t3>link</a>')
path = 'apps/some/path/to/document.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
def testInvalidRef(self):
# DocumentRenderer attempts to detect unclosed $(ref:...) tags by limiting
# how far it looks ahead. Lorem Ipsum should be long enough to trigger that.
_LOREM_IPSUM = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do '
'eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim '
'ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut '
'aliquip ex ea commodo consequat. Duis aute irure dolor in '
'reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla '
'pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.')
document = ('An invalid $(ref:foo.foo_t3 a title ' + _LOREM_IPSUM +
'$(ref:baz.baz_e1) here')
expected_document = ('An invalid $(ref:foo.foo_t3 a title ' + _LOREM_IPSUM +
'<a href=#type-baz_e1>baz.baz_e1</a> here')
path = 'apps/some/path/to/document_api.html'
text, warnings = self._renderer.Render(document, path)
self.assertEqual(expected_document, text)
self.assertEqual([], warnings)
text, warnings = self._renderer.Render(document, path, render_title=True)
self.assertEqual(expected_document, text)
self.assertEqual(['Expected a title'], warnings)
if __name__ == '__main__':
unittest.main()
|
rh-marketingops/dwm
|
setup.py
|
Python
|
gpl-3.0
| 1,421
| 0.001407
|
"""
dwm package setup
"""
from __future__ import print_function
from setuptools import setup, find_packages
__version__ = '1.1.0'
def readme():
""" open readme for long_description """
try:
with open('README.md') as fle:
return fle.read()
except IOError:
return ''
setup(
name='dwm',
version=__version__,
url='https://github.com/rh-marketingops/dwm',
license='GNU General Public License',
author='Jeremiah Coleman',
tests_require=['nose', 'mongomock>=3.5.0'],
install_requires=['pymongo>=3.2.2', 'tqdm>=4.8.4'],
author_email='colemanja91@gmail.com',
description='Best practices for marketing data quality management',
long_description=readme(),
packages=find_packages(),
include_pack
|
age_data=True,
platforms='any',
test_suite='nose.collector',
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audi
|
ence :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks'
],
keywords='marketing automation data quality cleanse washing cleaning'
)
|
jsubpy/jsub
|
jsub/operation/submit.py
|
Python
|
mit
| 4,925
| 0.031675
|
import os
import logging
from jsub.util import safe_mkdir
from jsub.util import safe_rmdir
class Submit(object):
def __init__(self, manager, task_id, sub_ids=None, dry_run=False, resubmit=False):
self.__manager = manager
self.__task = self.__manager.load_task(task_id)
self.__sub_ids = sub_ids
self.__dry_run = dry_run
self.__resubmit = resubmit
self.__logger = logging.getLogger('JSUB')
if self.__sub_ids==None:
self.__sub_ids=range(len(self.__task.data['jobvar']))
self.__initialize_manager()
def __initialize_manager(self):
self.__config_mgr = self.__manager.load_config_manager()
self.__backend_mgr = self.__manager.load_backend_manager()
self.__bootstrap_mgr = self.__manager.load_bootstrap_manager()
self.__navigator_mgr = self.__manager.load_navigator_manager()
self.__context_mgr = self.__manager.load_context_manager()
self.__action_mgr = self.__manager.load_action_manager()
self.__launcher_mgr = self.__manager.load_launcher_manager()
def handle(self):
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
main_root = os.path.join(run_root, 'main')
safe_rmdir(main_root)
safe_mkdir(main_root)
self.__create_input(main_root)
self.__create_context(main_root)
self.__create_action(main_root)
self.__create_navigator(main_root)
self.__create_bootstrap(main_root)
launcher_param = self.__create_launcher(run_root)
self.__submit(launcher_param)
def __create_input(self, main_root):
content = self.__manager.load_content()
input_dir = os.path.join(main_root,'input')
try:
content.get(self.__task.data['id'], 'input', os.path.join(main_root, 'input'))
except:
safe_mkdir(input_dir)
def __create_context(self, main_root):
context_dir = os.path.join(main_root, 'context')
safe_mkdir(context_dir)
action_default = {}
for unit, param in self.__task.data['workflow'].items():
action_default[unit] = self.__action_mgr.default_config(param['type'])
navigators = self.__config_mgr.navigator()
context_format = self.__navigator_mgr.context_format(navigators)
self.__context_mgr.create_context_file(self.__task.data, action_default, context_format, context_dir)
def __create_action(self, main_root):
action_dir = os.path.join(main_root, 'action')
safe_mkdir(action_dir)
actions = set()
for unit, param in self.__task.data['workflow'].items():
actions.add(param['type'])
self.__action_mgr.create_actions(actions, action_dir)
def __create_navigator(self, main_root):
navigator_dir = os.path.join(main_root, 'navigator')
safe_mkdir(navigator_dir)
navigators = self.__config_mgr.navigator()
self.__navigator_mgr.create_navigators(navigators, navigator_dir)
def __create_bootstrap(self, main_root):
bootstrap_dir = os.path.join(main_root, 'bootstrap')
safe_mkdir(b
|
ootstrap_dir)
bootstrap = self.__config_mgr.bootstrap()
self.__bootstrap_mgr.create_bootstrap(bootstrap, bootstrap_dir)
def __create_launcher(self, run_root):
lau
|
ncher = self.__task.data['backend']['launcher']
return self.__launcher_mgr.create_launcher(launcher, run_root)
def __submit(self, launcher_param):
if self.__dry_run:
return
if self.__resubmit==False:
if self.__task.data.get('backend_job_ids') or self.__task.data.get('backend_task_id'):
self.__logger.info('This task has already been submitted to backend, rerun the command with "-r" option if you wish to delete current jobs and resubmit the task.')
return
else:
self.__logger.info('Removing submitted jobs on backend before resubmission.')
task_id = self.__task.data.get('backend_task_id')
#remove previously generated files in job folder
job_ids = self.__task.data.get('backend_job_ids')
run_root = self.__backend_mgr.get_run_root(self.__task.data['backend'], self.__task.data['id'])
job_root=os.path.join(run_root,'subjobs')
safe_rmdir(job_root)
if task_id:
self.__backend_mgr.delete_task(self.__task.data['backend'],backend_task_id = task_id)
elif job_ids:
self.__backend_mgr.delete_jobs(self.__task.data['backend'],backend_job_ids = job_ids)
result = self.__backend_mgr.submit(self.__task.data['backend'], self.__task.data['id'], launcher_param, sub_ids = self.__sub_ids)
if not type(result) is dict:
result = {}
if 'backend_job_ids' in result:
njobs = len(result['backend_job_ids'])
else:
njobs = len(result)
if njobs>0:
self.__logger.info('%d jobs successfully submitted to backend.'%(njobs))
self.__task.data.setdefault('backend_job_ids',{})
backend_job_ids=result.get('backend_job_ids',{})
backend_task_id=result.get('backend_task_id',0)
self.__task.data['backend_job_ids'].update(backend_job_ids)
self.__task.data['backend_task_id']=backend_task_id
self.__task.data['status'] = 'Submitted'
task_pool = self.__manager.load_task_pool()
task_pool.save(self.__task)
self.__logger.debug(result)
|
rogerthat-platform/rogerthat-backend
|
src/rogerthat/bizz/profile.py
|
Python
|
apache-2.0
| 48,219
| 0.003152
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import datetime
import hashlib
from httplib import HTTPException
import json
import logging
import os
import re
from types import NoneType
import types
from google.appengine.api import images, urlfetch, search
from google.appengine.api.images import composite, TOP_LEFT, BOTTOM_LEFT
from google.appengine.api.urlfetch_errors import DeadlineExceededError
from google.appengine.ext import db, deferred
import facebook
from mcfw.cache import invalidate_cache
from mcfw.properties import azzert
from mcfw.rpc import returns, arguments
from mcfw.utils import chunks
from rogerthat.bizz.friends import INVITE_ID, INVITE_FACEBOOK_FRIEND, invite, breakFriendShip, makeFriends, userCode
from rogerthat.bizz.job import run_job
from rogerthat.bizz.messaging import sendMessage
from rogerthat.bizz.session import drop_sessions_of_user
from rogerthat.bizz.system import get_identity, identity_update_response_handler
from rogerthat.bizz.user import reactivate_user_profile
from rogerthat.capi.system import identityUpdate
from rogerthat.consts import MC_DASHBOARD
from rogerthat.dal import parent_key, put_and_invalidate_cache
from rogerthat.dal.app import get_app_name_by_id, get_app_by_user, get_default_app
from rogerthat.dal.broadcast import get_broadcast_settings_flow_cache_keys_of_user
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_avatar_by_id, get_existing_profiles_via_facebook_ids, \
get_existing_user_profiles, get_user_profile, get_profile_infos, get_profile_info, get_service_profile, \
is_trial_service, \
get_user_profiles, get_service_or_user_profile, get_deactivated_user_profile
from rogerthat.dal.service import get_default_service_identity_not_cached, get_all_service_friend_keys_query, \
get_service_identities_query, get_all_archived_service_friend_keys_query, get_friend_serviceidentity_connection, \
get_default_service_identity
from rogerthat.models import FacebookUserProfile, Avatar, ProfilePointer, ShortURL, ProfileDiscoveryResult, \
FacebookProfilePointer, FacebookDiscoveryInvite, Message, ServiceProfile, UserProfile, ServiceIdentity, ProfileInfo, \
App, \
Profile, SearchConfig, FriendServiceIdentityConnectionArchive, \
UserData, UserDataArchive, ActivationLog, ProfileHashIndex
from rogerthat.rpc import users
from rogerthat.rpc.models import Mobile
from rogerthat.rpc.rpc import logError, SKIP_ACCOUNTS
from rogerthat.rpc.service import BusinessException
from rogerthat.to.friends import FacebookRogerthatProfileMatchTO
from rogerthat.to.messaging import ButtonTO, UserMemberTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.to.system import IdentityUpdateRequestTO
from rogerthat.translations import localize, DEFAULT_LANGUAGE
from rogerthat.utils import now, urlencode, is_clean_app_user_email, get_epoch_from_datetime
from rogerthat.utils.app import get_app_id_from_app_user, create_app_user, get_human_user_from_app_user, \
get_app_user_tuple, create_app_user_by_email
from rogerthat.utils.channel import send_message
from rogerthat.utils.oauth import LinkedInClient
from rogerthat.utils.service import create_service_identity_user, remove_slash_default
from rogerthat.utils.transactions import on_trans_committed, run_in_transaction
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
CURRENT_DIR = os.path.dirname(__file__)
UNKNOWN_AVATAR_PATH = os.path.join(CURRENT_DIR, 'unknown_avatar.png')
NUNTIUZ_AVATAR_PATH = os.path.join(CURRENT_DIR, 'nuntiuz.png')
USER_INDEX = "USER_INDEX"
class FailedToBuildFacebookProfileException(BusinessException):
pass
def get_unknown_avatar():
f = open(UNKNOWN_AVATAR_PATH, "rb")
try:
return f.read()
finally:
f.close()
def get_nuntiuz_avatar():
f = open(NUNTIUZ_AVATAR_PATH, "rb")
try:
return f.read()
finally:
f.close()
UNKNOWN_AVATAR = get_unknown_avatar()
NUNTIUZ_AVATAR = get_nuntiuz_avatar()
@returns(NoneType)
@arguments(app_user=users.User)
def schedule_re_index(app_user):
# Does NOT have to be transactional, running it over and over does not harm
deferred.defer(_re_index, app_user)
def create_user_index_document(index, app_user_email, fields):
email_encoded = 'base64:' + base64.b64encode(app_user_email)
doc = search.Document(doc_id=email_encoded, fields=fields)
return index.put(doc)[0]
def delete_user_index_document(index, app_user_email):
email_encoded = 'base64:' + base64.b64encode(app_user_email)
return index.delete(email_encoded)[0]
def _re_index(app_user):
def trans():
user_profile = get_profile_info(app_user, False)
f
|
m = get_friends_map(app_user)
return user_profile, fm
user_profile, fm = db.run_in_transaction(trans)
app_user_email = app_user.email()
# delete old indexed app user if the doc_id is app_user_email (not encoded)
user_index = search.Index(name
|
=USER_INDEX)
try:
if user_index.get(app_user_email):
user_index.delete(app_user_email)
except search.InvalidRequest:
pass
if not user_profile:
logging.info("Tried to index a user who is deactivated")
delete_user_index_document(user_index, app_user_email)
return
if user_profile.isServiceIdentity:
logging.error("Tried to index a service into the USER_INDEX")
return
connections = StringIO()
for f in fm.friends:
email = f.email().encode('utf8').replace('"', '')
connections.write('@@%s@@' % email)
if '/' in email:
connections.write('@@%s@@' % email.split('/')[0])
human_user, app_id = get_app_user_tuple(app_user)
fields = [
search.TextField(name='email', value=human_user.email()),
search.TextField(name='name', value=user_profile.name),
search.TextField(name='language', value=user_profile.language),
search.TextField(name='connections', value=connections.getvalue()),
search.TextField(name='app_id', value=app_id)
]
if user_profile.profileData:
data = json.loads(user_profile.profileData)
for key, value in data.iteritems():
fields.append(search.TextField(name='pd_%s' % key.replace(' ', '_'), value=value))
create_user_index_document(user_index, app_user_email, fields)
@returns([UserDetailsTO])
@arguments(name_or_email_term=unicode, app_id=unicode)
def search_users_via_name_or_email(name_or_email_term, app_id=None):
logging.info("Looking for users with term '%s'." % name_or_email_term)
if len(name_or_email_term) < 3:
logging.info("Search term is to short. Bye bye.")
return []
name_or_email_term = name_or_email_term.replace('"', '')
if app_id:
query = search.Query(query_string='email:"%s" OR name:"%s" app_id:%s' % (name_or_email_term, name_or_email_term, app_id),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
else:
query = search.Query(query_string='email:"%s" OR name:"%s"' % (name_or_email_term, name_or_email_term),
options=search.QueryOptions(returned_fields=['email', 'name', 'language', 'app_id'], limit=10))
search_result = search.Index(name=USER_INDEX).search(query)
return [UserDetailsTO.create(email=doc.fields[0].value,
name=doc.fields[1].value,
language=doc.fields[2].value,
|
bbockelm/glideinWMS
|
frontend/glideinFrontendConfig.py
|
Python
|
bsd-3-clause
| 19,293
| 0.020577
|
import string
import os.path
import urllib
import cPickle
import copy
import sys
from glideinwms.creation.lib.matchPolicy import MatchPolicy
from glideinwms.lib import hashCrypto
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# Frontend config related classes
#
############################################################
#
# Configuration
#
############################################################
class FrontendConfig:
def __init__(self):
# set default values
# user should modify if needed
self.frontend_descript_file = "frontend.descript"
self.group_descript_file = "group.descript"
self.params_descript_file = "params.cfg"
self.attrs_descript_file = "attrs.cfg"
self.signature_descript_file = "signatures.sha1"
self.signature_type = "sha1"
self.history_file = "history.pk"
# global configuration of the module
frontendConfig=FrontendConfig()
############################################################
#
# Helper function
#
############################################################
def get_group_dir(base_dir,group_name):
return os.path.join(base_dir,"group_"+group_name)
############################################################
#
# Generic Class
# You most probably don't want to use these
#
############################################################
# loads a file or URL composed of
# NAME VAL
# and creates
# self.data[NAME]=VAL
# It also defines:
# self.config_file="name of file"
# If validate is defined, also defines
# self.hash_value
class ConfigFile:
def __init__(self,config_dir,config_file,convert_function=repr,
validate=None): # if defined, must be (hash_algo,value)
self.config_dir=config_dir
self.config_file=config_file
self.data={}
self.load(os.path.join(config_dir,config_file),convert_function,validate)
self.derive()
def open(self,fname):
if (fname[:5]=="http:") or (fname[:6]=="https:") or (fname[:4]=="ftp:"):
# one of the supported URLs
return urllib.urlopen(fname)
else:
# local file
return open(fname,"r")
def validate_func(self,data,validate,fname):
if validate is not None:
vhash=hashCrypto.get_hash(validate[0],data)
self.hash_value=vhash
if (validate[1] is not None) and (vhash!=validate[1]):
raise IOError, "Failed validation of '%s'. Hash %s computed to '%s', expected '%s'"%(fname,validate[0],vhash,validate[1])
def load(self,fname,convert_function,
validate=None): # if defined, must be (hash_algo,value)
self.data={}
fd=self.open(fname)
try:
data=fd.read()
self.validate_func(data,validate,fname)
lines=data.splitlines()
del data
for line in lines:
if line[0]=="#":
continue # comment
if len(string.strip(line))==0:
continue # empty line
self.split_func(line,convert_function)
finally:
fd.close()
def split_func(self,line,convert_function):
larr=string.split(line,None,1)
lname=larr[0]
if len(larr)==1:
lval=""
else:
lval=larr[1]
exec("self.data['%s']=%s"%(lname,convert_function(lval)))
def derive(self):
return # by default, do nothing
def __str__(self):
output = '\n'
for key in self.data.keys():
output += '%s = %s, (%s)\n' % (key, str(self.data[key]), type(self.data[key]))
return output
# load from the group subdir
class GroupConfigFile(ConfigFile):
def __init__(self,base_dir,group_name,config_file,convert_function=repr,
validate=None): # if defined, must be (hash_algo,value)
ConfigFile.__init__(self,get_group_dir(base_dir,group_name),config_file,convert_function,validate)
self.group_name=group_name
# load both the main and group subdir config file
# and join the results
# Also defines:
# self.group_hash_value, if group_validate defined
class JoinConfigFile(ConfigFile):
def __init__(self,base_dir,group_name,config_file,convert_function=repr,
main_validate=None,group_validate=None): # if defined, must be (hash_algo,value)
ConfigFile.__init__(self,base_dir,config_file,convert_function,main_validate)
self.group_name=group_name
group_obj=GroupConfigFile(base_dir,group_name,config_file,convert_function,group_validate)
if group_validate is not None:
self.group_hash_value=group_obj.hash_value
#merge by overriding whatever is found in the subdir
for k in group_obj.data.keys():
self.data[k]=group_obj.data[k]
##
|
##########################################################
#
# Configuration
#
############################################################
class FrontendDes
|
cript(ConfigFile):
def __init__(self,config_dir):
global frontendConfig
ConfigFile.__init__(self,config_dir,frontendConfig.frontend_descript_file,
repr) # convert everything in strings
class ElementDescript(GroupConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
GroupConfigFile.__init__(self,base_dir,group_name,frontendConfig.group_descript_file,
repr) # convert everything in strings
class ParamsDescript(JoinConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
JoinConfigFile.__init__(self,base_dir,group_name,frontendConfig.params_descript_file,
lambda s:"('%s',%s)"%tuple(s.split(None,1))) # split the array
self.const_data={}
self.expr_data={} # original string
self.expr_objs={} # compiled object
for k in self.data.keys():
type_str,val=self.data[k]
if type_str=='EXPR':
self.expr_objs[k]=compile(val,"<string>","eval")
self.expr_data[k]=val
elif type_str=='CONST':
self.const_data[k]=val
else:
raise RuntimeError, "Unknown parameter type '%s' for '%s'!"%(type_str,k)
class AttrsDescript(JoinConfigFile):
def __init__(self,base_dir,group_name):
global frontendConfig
JoinConfigFile.__init__(self,base_dir,group_name,frontendConfig.attrs_descript_file,
str) # they are already in python form
# this one is the special frontend work dir signature file
class SignatureDescript(ConfigFile):
def __init__(self,config_dir):
global frontendConfig
ConfigFile.__init__(self,config_dir,frontendConfig.signature_descript_file,
None) # Not used, redefining split_func
self.signature_type=frontendConfig.signature_type
def split_func(self,line,convert_function):
larr=string.split(line,None)
if len(larr)!=3:
raise RuntimeError, "Invalid line (expected 3 elements, found %i)"%len(larr)
self.data[larr[2]]=(larr[0],larr[1])
# this one is the generic hash descript file
class BaseSignatureDescript(ConfigFile):
def __init__(self,config_dir,signature_fname,signature_type,validate=None):
ConfigFile.__init__(self,config_dir,signature_fname,
None, # Not used, redefining split_func
validate)
self.signature_type=signature_type
def split_func(self,line,convert_function):
larr=string.split(line,None,1)
if len(larr)!=2:
raise RuntimeError, "Invalid line (expected 2 elements, found %i)"%len(larr)
lval=larr[1]
self.data[lval]=larr[0]
############################################################
#
# Processed configuration
#
############################################################
# not everything is merged
# the old element can still be accessed
class ElementMergedDescript:
def __init__(self,base_dir,group_n
|
hemmerling/codingdojo
|
src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin01/gol01.py
|
Python
|
apache-2.0
| 685
| 0.013139
|
#----------------------------------------------
|
---------------------------------
# Name: module1
# Purpose:
#
# Author: Admi
|
nistrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol01:
def __init__(self):
self.isCellAlive = False
pass
def setAlive(self, width,height):
self.isCellAlive = True
return True
def isAlive(self):
return self.isCellAlive
def survives(self):
return False
if __name__ == '__main__':
pass
|
botswana-harvard/edc-death-report
|
edc_death_report/models/cause.py
|
Python
|
gpl-2.0
| 196
| 0
|
from
|
edc_base.model_mixins import BaseModel, ListModelMixin
class Cause (ListModelMixin, BaseModel):
class Meta:
ordering = ['display_ind
|
ex']
app_label = 'edc_death_report'
|
plotly/python-api
|
packages/python/plotly/plotly/validators/cone/_vsrc.py
|
Python
|
mit
| 426
| 0
|
import _plotly_utils.basevalidators
class VsrcValidator(_plotly_utils.basevali
|
dators.SrcValidator):
def __init__(self, plotly_name="vsrc", parent_name="cone", **kwargs):
super(VsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
|
**kwargs
)
|
anythingrandom/eclcli
|
eclcli/sss/v1/api_keypair.py
|
Python
|
apache-2.0
| 878
| 0.006834
|
# -*- coding: utf-8 -*-
from eclcli.common import command
from eclcli.common import exceptions
from eclcli.common import utils
from eclcli.identity import common as identity_common
from ..sssclient.common.utils import objectify
class SetAPIKeypair(command.ShowOne):
def get_parser(self, prog_name):
parser = super(SetAPIKeypair, self).get_parser(prog_name)
parser.add_argument(
'user_id',
metavar="<uuid>",
he
|
lp=(""),
)
return parser
def take_action(se
|
lf, parsed_args):
sss_client = self.app.client_manager.sss
user_id = parsed_args.user_id
keypair = sss_client.set_api_keypair(user_id)
columns = utils.get_columns(keypair)
obj = objectify(keypair)
data = utils.get_item_properties(obj, columns)
return (columns, data)
|
michaelerule/neurotools
|
graphics/pygame.py
|
Python
|
gpl-3.0
| 3,004
| 0.018642
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import print_function
'''
Collected utilities for pygame
It is difficult to write pixels directly in python.
There's some way to get a framebuffer back from Tk, but it is
cumberosme.
The module pygame supports sending pixel buffers,
which is wrapped for convneinece in this module.
example usage
import neurotools.graphics.pygame as npg
import time
import numpy as np
import pygame
K = 128
screen = npg.start(K,K,'Image data')
dt = 1
|
/20
wait_til = time.time() + dt
print('Animating..')
for i in neurotools.tools.progress_bar(range(100)):
t = time.time()
if t<wait_til: time.sleep(wait_til-t)
wait_til = t + dt
npg.draw_array(screen, np.random.rand(K,K,3))
pygame.quit()
'''
import sys
import numpy as np
try:
import pygame as pg
except:
print('pygame package is m
|
issing; it is obsolete so this is not unusual')
print('pygame graphics will not work')
pg = None
def enable_vsync():
if sys.platform != 'darwin':
return
try:
import ctypes
import ctypes.util
ogl = ctypes.cdll.LoadLibrary(ctypes.util.find_library("OpenGL"))
# set v to 1 to enable vsync, 0 to disable vsync
v = ctypes.c_int(1)
ogl.CGLSetParameter(ogl.CGLGetCurrentContext(), ctypes.c_int(222), ctypes.pointer(v))
except:
print("Unable to set vsync mode, using driver defaults")
def start(W,H,name='untitled'):
# Get things going
pg.quit()
pg.init()
enable_vsync()
window = pg.display.set_mode((W,H))
pg.display.set_caption(name)
return window
def draw_array(screen,rgbdata,doshow=True):
'''
Send array data to a PyGame window.
PyGame is BRG order which is unusual -- reorder it.
Parameters
----------
screen : object
Object returned by neurotools.graphics.pygame.start
rgbdata :
RGB image data with color values in [0,1]
'''
# Cast to int
rgbdata = np.int32(rgbdata*255)
# clip bytes to 0..255 range
rgbdata[rgbdata<0]=0
rgbdata[rgbdata>255]=255
# get color dimension
if len(rgbdata.shape)==3:
w,h,d = rgbdata.shape
else:
w,h = rgbdata.shape
d=1
# repack color data in screen format
draw = np.zeros((w,h,4),'uint8')
if d==1:
draw[...,0]=rgbdata
draw[...,1]=rgbdata
draw[...,2]=rgbdata
draw[...,3]=255 # alpha channel
if d==3:
draw[...,:3]=rgbdata[...,::-1]
draw[...,-1]=255 # alpha channel
if d==4:
draw[...,:3]=rgbdata[...,-2::-1]
draw[...,-1]=rgbdata[...,-1]
# get surface and copy data to sceeen
surface = pg.Surface((w,h))
numpy_surface = np.frombuffer(surface.get_buffer())
numpy_surface[...] = np.frombuffer(draw)
del numpy_surface
screen.blit(surface,(0,0))
if doshow:
pg.display.update()
|
h4ck3rm1k3/gcc_py_introspector
|
data/body4.py
|
Python
|
gpl-2.0
| 6,023
| 0.028889
|
#!/usr/bin/python
from body3 import *
function_decl(link='extern',srcp='eval.c:216',
body=bind_expr(
body=statement_list(
E0=decl_expr(
ftype=void_type(algn='
|
8',name='126')),
E1=decl_expr(
ft
|
ype=void_type(algn='8',name='126')),
E2=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E3=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
E4=cond_expr(
OP0=truth_andif_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='shell.h:94',used='1',
name=identifier_node(string='interactive')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=ne_expr(
OP0=nop_expr(
OP0=component_ref(
OP0=var_decl(algn='64',srcp='input.h:89',used='1',
name=identifier_node(string='bash_input')),
OP1=field_decl(algn='32',srcp='input.h:82',
name=identifier_node(string='type'))),
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')),
OP1=integer_cst(low='3',
ftype=integer_type(algn='32',max='29',min='28',name='17',prec='32',sign='unsigned',size='5')))),
OP1=statement_list(
E0=modify_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=call_expr(
E0=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='906',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=pointer_type(algn='64',ptd='9',size='22')),
E1=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
OP1=integer_cst(low='0',
ftype=pointer_type(algn='64',ptd='9',size='22')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
OP1=call_expr(
E0=var_decl(algn='64',srcp='eval.c:219',used='1',
name=identifier_node(string='command_to_execute')),
E1=nop_expr(
OP0=addr_expr(
OP0=pointer_type(algn='64'),
ftype=string_cst(string='PROMPT_COMMAND',
ftype=array_type(algn='8',domn='13067',elts='9',size='13066'))),
ftype=pointer_type(algn='64',ptd='9',size='22')),
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='10721',link='extern',name='10720',srcp='input.h:105')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E2=cond_expr(
OP0=eq_expr(
OP0=var_decl(algn='32',srcp='eval.c:51',used='1',
name=identifier_node(string='running_under_emacs')),
OP1=integer_cst(low='2',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126')),
E5=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:54',used='1',
name=identifier_node(string='current_command_line_count')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
E6=modify_expr(
OP0=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='2560',link='extern',name='12695',srcp='externs.h:104')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
E7=cond_expr(
OP0=ne_expr(
OP0=var_decl(algn='32',srcp='eval.c:53',used='1',
name=identifier_node(string='need_here_doc')),
OP1=integer_cst(low='0',
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5'))),
OP1=call_expr(
fn=addr_expr(
OP0=pointer_type(algn='64'),
ftype=function_decl(body='undefined',ftype='5191',link='extern',name='10700',srcp='input.h:104')),
ftype=void_type(algn='8',name='126')),
ftype=void_type(algn='8',name='126')),
E8=return_expr(
expr=modify_expr(
OP0=result_decl(algn='32',note='art:artificial',srcp='eval.c:216'),
OP1=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r')),
ftype=integer_type(algn='32',max='7',min='6',name='1',prec='32',sign='signed',size='5')),
ftype=void_type(algn='8',name='126'))),
ftype=void_type(algn='8',name='126'),
vars=var_decl(algn='32',srcp='eval.c:218',used='1',
name=identifier_node(string='r'))),
name=identifier_node(string='parse_command'))
|
spnow/grr
|
gui/plugins/fileview_widgets.py
|
Python
|
apache-2.0
| 7,084
| 0.005364
|
#!/usr/bin/env python
# Copyr
|
ight 2012 Google Inc. All Rights Reserved.
"""Widgets for advanced display of files."
|
""
import json
from django import http
from grr.gui import renderers
from grr.lib import utils
class HexView(renderers.TemplateRenderer):
"""Display a HexView of a file.
Internal State:
- aff4_path: The name of the aff4 object we are viewing now.
- age: The version of the AFF4 object to display.
"""
table_width = 32
total_size = 0
# The state of this widget.
state = {}
# This is the template used by the js to build the hex viewer html.
table_jquery_template = """
<script id="HexTableTemplate" type="text/x-jquery-tmpl">
<table class="monospace">
<tbody>
<tr id="hex_header" class="ui-state-default">
<th id="offset">offset</th>
<th id="data_column"></th>
</tr>
<tr>
<td id="offset_area">
<table>
</table>
</td>
<td id="hex_area">
<table>
</table>
</td>
<td id="data_area" class="data_area">
<table>
</table>
</td>
<td class='slider_area'><div id=slider></div></td>
</tr>
</tbody>
</table>
</script>
"""
layout_template = renderers.Template("""
<div id="{{unique|escape}}" style="position: absolute; top: 45px;
right: 0; bottom: 0; left: 0"></div> """ + table_jquery_template + """
<script>
$("#{{unique|escapejs}}").resize(function() {
grr.hexview.HexViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
{{this.table_width|escapejs}}, {{this.state_json|safe}});
});
$("#{{unique|escapejs}}").resize();
</script>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(HexView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the hex viewer in JSON."""
try:
row_count = int(request.REQ.get("hex_row_count", 10))
except ValueError:
row_count = 2
try:
offset = int(request.REQ.get("offset", 0))
except ValueError:
offset = 0
encoder = json.JSONEncoder()
data = [ord(x) for x in self.ReadBuffer(
request, offset, row_count * self.table_width)]
response = dict(offset=offset, values=data)
response["total_size"] = self.total_size
return http.HttpResponse(encoder.encode(response),
content_type="text/json")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return [x % 255 for x in xrange(offset, offset + length)]
class TextView(renderers.TemplateRenderer):
"""Display a TextView of a file."""
# The state of this widget.
state = {}
total_size = 0
default_codec = "utf_8"
allowed_codecs = ["base64_codec", "big5", "big5hkscs", "cp037", "cp1006",
"cp1026", "cp1140", "cp1250", "cp1251", "cp1252",
"cp1253", "cp1254", "cp1255", "cp1256", "cp1257",
"cp1258", "cp424", "cp437", "cp500", "cp737",
"cp775", "cp850", "cp852", "cp855", "cp856", "cp857",
"cp860", "cp861", "cp862", "cp863", "cp864", "cp865",
"cp866", "cp869", "cp874", "cp875", "cp932", "cp949",
"cp950" "idna", "rot_13", "utf_16", "utf_16_be",
"utf_16_le", "utf_32", "utf_32_be", "utf_32_le",
"utf_7", "utf_8", "utf_8_sig", "uu_codec", "zlib_codec"]
layout_template = renderers.Template("""
<div id="{{unique|escape}}">
<div id="text_viewer">
offset <input id="text_viewer_offset" name="offset" type=text value=0 size=6>
size <input id="text_viewer_data_size" name="text_data_size"
type=text value=0 size=6>
encoding <select id="text_encoding" name="text_encoding">
{% for encoder in this.allowed_codecs %}
<option value={{encoder|escape}}>{{encoder|escape}}</option>
{% endfor %}
</select>
<div id="text_viewer_slider"></div>
<div id="text_viewer_data" total_size=0>
<div id="text_viewer_data_content" total_size=0></div>
</div>
<script>
grr.textview.TextViewer("{{renderer|escapejs}}", "{{unique|escapejs}}",
"{{this.default_codec|escapejs}}",
{{this.state_json|safe}});
</script>
</div>
</div>
""")
action_template = renderers.Template("""
<div id="text_viewer_data_content" total_size="{{this.total_size|escape}}">
{% if this.error %}
<div class="errormsg">{{this.error|escape}}</div>
{% else %}
<pre class="monospace">
{{this.data|escape}}
</pre>
{% endif %}
</div>
""")
def Layout(self, request, response):
"""Render the content of the tab or the container tabset."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
self.state["age"] = request.REQ.get("age")
encoder = json.JSONEncoder()
self.state_json = encoder.encode(self.state)
return super(TextView, self).Layout(request, response)
def RenderAjax(self, request, response):
"""Return the contents of the text viewer."""
try:
self.data_size = int(request.REQ.get("data_size", 10000))
self.offset = int(request.REQ.get("offset", 0))
except ValueError:
self.error = "Invalid data_size or offset given."
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
text_encoding = request.REQ.get("text_encoding", self.default_codec)
try:
buf = self.ReadBuffer(request, self.offset, self.data_size)
self.data = self._Decode(text_encoding, buf)
except RuntimeError as e:
self.error = "Failed to decode: %s" % utils.SmartStr(e)
return renderers.TemplateRenderer.Layout(self, request, response,
self.action_template)
def _Decode(self, codec_name, data):
"""Decode data with the given codec name."""
if codec_name not in self.allowed_codecs:
raise RuntimeError("Invalid encoding requested.")
try:
return data.decode(codec_name, "replace")
except LookupError:
raise RuntimeError("Codec could not be found.")
except AssertionError:
raise RuntimeError("Codec failed to decode")
def ReadBuffer(self, request, offset, length):
"""Should be overriden by derived classes to satisfy read requests.
Args:
request: The original request object.
offset: The offset inside the file we should read from.
length: The number of bytes to return.
Returns:
An array of integers between 0 and 255 corresponding to the bytes.
"""
return "".join(x % 255 for x in xrange(offset, offset + length))
|
rahulunair/nova
|
nova/scheduler/filters/io_ops_filter.py
|
Python
|
apache-2.0
| 2,476
| 0
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, soft
|
ware
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
|
import nova.conf
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class IoOpsFilter(filters.BaseHostFilter):
"""Filter out hosts with too many concurrent I/O operations."""
RUN_ON_REBUILD = False
def _get_max_io_ops_per_host(self, host_state, spec_obj):
return CONF.filter_scheduler.max_io_ops_per_host
def host_passes(self, host_state, spec_obj):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
max_io_ops = self._get_max_io_ops_per_host(
host_state, spec_obj)
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
class AggregateIoOpsFilter(IoOpsFilter):
"""AggregateIoOpsFilter with per-aggregate the max io operations.
Fall back to global max_io_ops_per_host if no per-aggregate setting found.
"""
def _get_max_io_ops_per_host(self, host_state, spec_obj):
max_io_ops_per_host = CONF.filter_scheduler.max_io_ops_per_host
aggregate_vals = utils.aggregate_values_from_key(
host_state,
'max_io_ops_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, max_io_ops_per_host, cast_to=int)
except ValueError as e:
LOG.warning("Could not decode max_io_ops_per_host: '%s'", e)
value = max_io_ops_per_host
return value
|
Fokko/incubator-airflow
|
tests/contrib/operators/test_ecs_operator.py
|
Python
|
apache-2.0
| 11,678
| 0.001199
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
from copy import deepcopy
from parameterized import parameterized
from airflow.contrib.operators.ecs_operator import ECSOperator
from airflow.exceptions import AirflowException
from tests.compat import mock
RESPONSE_WITHOUT_FAILURES = {
"failures": [],
"tasks": [
{
"containers": [
{
"containerArn":
"arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868",
"lastStatus": "PENDING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55"
}
],
"desiredStatus": "RUNNING",
"lastStatus": "PENDING",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11"
}
]
}
class TestECSOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def setUp(self, aws_hook_mock):
self.aws_hook_mock = aws_hook_mock
self.ecs_operator_args = {
'task_id': 'task',
'task_definition': 't',
'cluster': 'c',
'overrides': {},
'aws_conn_id': None,
'region_name': 'eu-west-1',
'group': 'group',
'placement_constraints': [{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}],
'network_configuration': {
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab']
}
}
}
self.ecs = ECSOperator(**self.ecs_operator_args)
def test_init(self):
self.assertEqual(self.ecs.region_name, 'eu-west-1')
self.assertEqual(self.ecs.task_definition, 't')
self.assertEqual(self.ecs.aws_conn_id, None)
self.assertEqual(self.ecs.cluster, 'c')
self.assertEqual(self.ecs.overrides, {})
self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.ecs.template_fields, ('overrides',))
@parameterized.expand([
['EC2', None],
['FARGATE', None],
['EC2', {'testTagKey': 'testTagValue'}],
])
@mock.patch.object(ECSOperator, '_wait_for_task_ended')
@mock.patch.object(ECSOperator, '_check_success_task')
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def test_execute_without_failures(self, launch_type, tags, aws_hook_mock,
check_mock, wait_mock):
client_mock = aws_hook_mock.return_value.get_client_type.return_value
client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES
ecs = ECSOperator(launch_type=launch_type, tags=tags, **self.ecs_operator_args)
ecs.execute(None)
aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
extend_args = {}
if launch_type == 'FARGATE':
extend_args['platformVersion'] = 'LATEST'
if tags:
extend_args['tags'] = [{'key': k, 'value': v} for (k, v) in tags.items()]
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType=launch_type,
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab']
}
},
**extend_args
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(ecs.arn,
'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES)
resp_failures['failures'].append('dummy error')
client_mock.run_task.return_value = resp_failures
with self.assertRaises(AirflowException):
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc'],
'subnets': ['subnet-123456ab'],
}
}
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
self.ecs._wait_for_ta
|
sk_ended()
client_mock.get_waiter.assert_called_o
|
nce_with('tasks_stopped')
client_mock.get_waiter.return_value.wait.assert_called_once_with(
cluster='c', tasks=['arn'])
self.assertEqual(
sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'lastStatus': 'STOPPED',
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is not in success state ", str(e.exception))
self.assertIn("'name': 'foo'", str(e.exception))
self.assertIn("'lastStatus': 'STOPPED'", str(e.exception))
self.assertIn("'exitCode': 1", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mo
|
oxpeter/small_fry
|
blastfaster.py
|
Python
|
gpl-2.0
| 3,115
| 0.006742
|
#!/usr/bin/env python
import re
import os
import sys
import math
import argparse
def count_deflines(fastafile):
"counts number of sequences are in a fasta file"
fasta_h = open(fastafile, 'rb')
counter = 0
for line in fasta_h:
if re.search('^>', line) is not None:
counter += 1
fasta_h.close()
return counter
def split_fasta(fastafile, numfiles):
"splits fastafile into numfiles even sized fastafiles"
numseqs = count_deflines(fastafile)
seqlimit = math.ceil( 1. * numseqs / numfiles ) # num seqs per split file
fasta_h = open(fastafile, 'rb')
line = ''
for f in range(numfiles):
filepref = os.path.splitext(fastafile)[0]
fasta_f = open('.'.join([filepref,str(f),'fasta']), 'w')
counter = 0
fasta_f.write(line)
for line in fasta_h:
if re.search('^>', line) is not None:
counter += 1
if counter == seqlimit:
break
fasta_f.write(line)
fasta_f.close()
fasta_h.close
def blastall(fastafile, numfiles, database, blastype='blastp'):
"does blast of split fastafiles against database"
for f in range(numfiles):
filepref = os.path.splitext(fastafile)[0]
fasta_f = '.'.join([filepref,str(f),'fasta'])
cmd = blastype + ' -db ' + database + \
' -query ' + fasta_f + \
' -outfmt 6 -out ' + filepref + '.' + str(f) + '.blastp.tsv &'
os.system(cmd)
# better tracking of this could be achieved using os.fork(), dropping the & and
# then recombining the files, but this is beyond my current abilities
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Speeds up all v all blastp search")
# input options
parser.add_argument("-I", "--input_file", type=str, help="The peptide fasta file (query file)")
parser.add_argument("-D", "--database", type=str, help="The blast database to use (target db)")
parser.add_argument("-b", "--bl
|
ast_type", type=str, default='blastp',
help="The blast algorithm to use. (d
|
efault = blastp)")
parser.add_argument("-p", "--num_threads", type=int, default=1,
help="number of threads to distribute blast over")
args = parser.parse_args()
## parse files to set the working directory for saving files
# parse input file:
fullname = os.path.realpath(args.input_file)
filename = os.path.basename(args.input_file)
filepath = os.path.dirname(os.path.realpath(args.input_file))
# parse database path:
dbfull = os.path.realpath(args.database)
# parse blast output name and dir:
filepref = os.path.splitext(fullname)[0]
print "splitting %s into %d files..." % (filename, args.num_threads)
split_fasta(fullname, args.num_threads)
print "split fasta files saved in dir: %s" % (filepath)
print "running blastp for all files"
print "results saved as %s.##.blastp.tsv" % (filepref)
blastall(fullname, args.num_threads, dbfull, blastype=args.blast_type)
|
rodxavier/open-pse-initiative
|
django_project/api/views/quotes_views.py
|
Python
|
mit
| 7,113
| 0.00478
|
import json
from datetime import datetime
from django.conf import settings
from django.core.paginator import Paginator
import requests
from rest_framework import generics
from rest_framework import views
from rest_framework.pagination import PaginationSerializer
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer, XMLRenderer
from rest_framework.response import Response
from rest_framework.reverse import reverse
from companies.models import Company
from quotes.models import Quote
from api.renderers import QuoteCSVRenderer
from api.serializers import QuoteSerializer
class QuoteListView(generics.ListAPIView):
"""
Returns a list of end-of-day quotes from the PSE
### Parameters
- **stocks** - A comma separated list of stock symbols
- **from_date** - Start date of end-of-day quotes. This is inclusive. **Format: YYYY-MM-DD**
- **to_date** - End date of end-of-day quotes. This is exclusive. **Format: YYYY-MM-DD**
*NOTE: All the parameters are not required. When neither `from_date` and `to_date` are provided,
the API returns the quotes from the latest available date.*
### Examples
Get the latest available end-of-day quote for a company
GET /api/quotes/?stocks=BDO
Get the latest available end-of-day quote for multiple companies
GET /api/quotes/?stocks=BDO,BPI,MBT
Get all available end-of-day quotes for all companies starting from the `from_date`
GET /api/quotes/?from_date=2014-04-07
Get all available end-of-day quotes for all companies starting until the `end_date`
GET /api/quotes/?to_date=2014-04-07
Get all available end-of-day quotes for all companies between from the `from_date`, until the `end_date`
GET /api/quotes/?from_date=2014-04-07&to_date=2014-11-11
"""
serializer_class = QuoteSerializer
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, XMLRenderer, QuoteCSVRenderer)
def get_queryset(self):
items = Quote.objects.all()
stocks = self.request.QUERY_PARAMS.get('stocks')
from_date = self.request.QUERY_PARAMS.get('from_date')
to_date = self.request.QUERY_PARAMS.get('to_date')
self.csv_filename = 'quotes_'
if stocks is not None:
stocks = stocks.split(',')
stocks = [x.upper() for x in stocks]
self.csv_filename += '_'.join(stocks) + '_'
items = items.filter(company__symbol__in=stocks)
if from_date is None and to_date is None:
latest_quote_date = Quote.objects.latest('quote_date').quote_date
self.csv_filename += latest_quote_date.strftime('%Y-%m-%d')
items = items.filter(quote_date=latest_quote_date)
|
elif from_date == to_date:
self.csv_filename += from_date
q
|
uote_date = datetime.strptime(from_date, '%Y-%m-%d')
items = items.filter(quote_date=quote_date)
else:
if from_date is not None:
self.csv_filename += 'from_' + from_date
from_date = datetime.strptime(from_date, '%Y-%m-%d')
items = items.filter(quote_date__gte=from_date)
if to_date is not None:
prefix = '_' if from_date is not None else ''
self.csv_filename += prefix + 'to_' + to_date
to_date = datetime.strptime(to_date, '%Y-%m-%d')
items = items.filter(quote_date__lt=to_date)
return items.order_by('quote_date', '-company__is_index', 'company__symbol')
def list(self, request, *args, **kwargs):
response = super(generics.ListAPIView, self).list(request, args, kwargs)
ret_format = self.request.QUERY_PARAMS.get('format')
if ret_format == 'csv':
filename = self.csv_filename + '.csv'
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(filename)
return response
class TickerView(views.APIView):
"""
Provides a near-realtime endpoint for quotes
### Parameters
- **stocks** - A comma separated list of stock symbols
### Examples
Get the latest available end-of-day quote for a company
GET /api/quotes/?stocks=BPI
"""
renderer_classes = (JSONRenderer, BrowsableAPIRenderer, XMLRenderer)
def get(self, request):
r = requests.get(settings.TICKER_URL)
response = json.loads(r.content)
data = {}
items = []
stocks = self.request.QUERY_PARAMS.get('stocks')
if stocks is not None:
stocks = stocks.split(',')
stocks = [x.upper() for x in stocks]
for item in response:
if item['securitySymbol'] == 'Stock Update As of':
as_of = item['securityAlias']
as_of = datetime.strptime(as_of, '%m/%d/%Y %I:%M %p')
data['as_of'] = as_of.strftime('%Y-%m-%d %I:%M%p')
else:
quote = {}
quote['symbol'] = item['securitySymbol'].upper()
if Company.objects.filter(symbol=quote['symbol']).count() != 0:
quote['name'] = Company.objects.get(symbol=quote['symbol']).name
else:
quote['name'] = item['securityAlias'].title()
quote['percent_change'] = item['percChangeClose']
quote['price'] = item['lastTradedPrice']
quote['volume'] = item['totalVolume']
quote['indicator'] = item['indicator']
if stocks is not None:
if quote['symbol'] in stocks:
items.append(quote)
else:
items.append(quote)
data['quotes'] = items
return Response(data)
class DailyQuotesDownloadView(views.APIView):
paginate_by = 50
def get(self, request):
base_url = reverse('api_quotes_list', request=request)
page_num = self.request.QUERY_PARAMS.get('page', 1)
quote_dates = Quote.objects.order_by('-quote_date').values_list('quote_date', flat=True).distinct()
paginator = Paginator(quote_dates, self.paginate_by)
page = paginator.page(page_num)
items = []
for obj in page.object_list:
date_string = obj.strftime('%Y-%m-%d')
item = {
'quote_date': date_string,
'csv_url': self.generate_download_url(base_url, date_string, 'csv'),
'json_url': self.generate_download_url(base_url, date_string, 'json'),
'xml_url': self.generate_download_url(base_url, date_string, 'xml'),
}
items.append(item)
page.object_list = items
serializer = PaginationSerializer(instance=page, context={'request': request})
data = serializer.data
return Response(data)
def generate_download_url(self, base_url, quote_date, format_type):
return '{0}?from_date={1}&to_date={1}&format={2}'.format(base_url, quote_date, format_type)
|
ccwang002/biocloud-server-kai
|
src/users/admin.py
|
Python
|
mit
| 1,557
| 0
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from .models import EmailUser as User
from .forms import AdminUserChangeForm, UserCreationForm
@admin.register(User)
class UserAdmin(UserAdmin):
fieldsets = (
(
None,
{'fields': ('
|
email', 'password')}
),
(
_('Personal info'),
{
'fields': (
'name', 'auth_number',
),
},
|
),
(
_('Permissions'),
{
'fields': (
'verified', 'is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions',
),
},
),
(
_('Important dates'),
{'fields': ('last_login', 'date_joined')},
),
)
add_fieldsets = (
(
None, {
'classes': ('wide',),
'fields': (
'email', 'password1', 'password2',
'name', 'verified',
),
},
),
)
form = AdminUserChangeForm
add_form = UserCreationForm
list_display = ('pk', 'email', 'name', 'is_staff')
list_display_links = ('email',)
list_filter = (
'verified', 'is_active', 'is_staff', 'is_superuser', 'groups',
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
|
KenKundert/abraxas
|
manpage.py
|
Python
|
gpl-3.0
| 83,088
| 0.008425
|
#!/usr/bin/env python
# Abraxas Collaborative Password Utility Documentation
#
# Converts a restructured text version of the manpages to nroff.
# License {{{1
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# Imports {{{1
from docutils.core import publish_string
from docutils.writers import manpage
from textwrap import dedent
from abraxas.prefs import SEARCH_FIELDS
from abraxas.version import DATE, VERSION
# Program Manpage {{{1
PROGRAM_MANPAGE = {
'name': 'abraxas',
'sect': '1',
'contents': r"""{
=========
abraxas
=========
------------------------------
collaborative password utility
------------------------------
:Author: Kale and Ken Kundert <abraxas@nurdletech.com>
:Date: {date}
:Version: {version}
:Manual section: 1
.. :Copyright: Kale and Ken Kundert
.. :Manual group: Utilities
SYNOPSIS
========
**abraxas** [*options*] [*account*]
OPTIONS
=======
-P, --password Output the password (default if nothing else is
specified).
-N, --username Output the username.
-Q <N>, --question <N> Output the answer to security question *N*.
-A, --account-number Output the account number.
-E, --email Output the email associated with this account.
-U, --url Output the website address.
-R, --remarks Output remarks.
-i, --info Output all account information except the
secrets (the password and the answers to the
security questions).
-a, --all Same as --info except also output the password.
-q, --quiet Disable all non-essential output.
|
-c, --clipboard Write output to clipboard r
|
ather than stdout.
-t, --autotype Mimic a keyboard to send output to the active
window rather than stdout. In this case any
command line arguments that specify what to
output are ignored and the *autotype* entry
directs what is to be output.
-f <str>, --find <str> List any account that contains the given string
in its ID.
-s <str>, --search <str>
List any account that contains the given string
in {search_fields}, or its ID.
-S, --stateless Do not use master password or accounts file.
-T <template>, --template <template>
Template to use if account is not found.
-b, --default-browser Open account in the default browser.
-B <browser>, --browser <browser>
Open account in the specified browser.
-n, --notify Output messages to notifier.
-l, --list List available master passwords and templates
(only pure templates are listed, not accounts,
even though accounts can be used as templates)
-w <secs>, --wait <secs>
Wait this log before clearing the secret (use
0 to disable clearing).
--archive Archive all the secrets to
~/.config/abraxas/archive.gpg.
--changed Identify all the secrets that have changed since
last archived.
-I <GPG-ID>, --init <GPG-ID>
Initialize the master password and accounts
files in ~/.config/abraxas (but only if they do
not already exist).
-h, --help Show a brief summary of available command line
options.
DIAGNOSTICS
===========
A log file is created in ~/.config/abraxas/log (the location of this
file can be specified in the *log_file* variable in the accounts file).
DESCRIPTION
===========
Abraxas is password utility that can store or generate your passwords
and produce them from the command line. It can also be configured to
autotype your username and password into the current window so that you
can log in with a simple keystroke.
Abraxas is capable of generating two types of passwords, character based
(pass words) or word based (pass phrases). Pass phrases are generally
preferred if you have a choice, but many websites will not take them.
The benefit of pass phrases is that they are relatively easy to remember
and type, and they are very secure. The pass phrases generated by
Abraxas generally consist of four words, each word is drawn from
a dictionary of 10,000 words. Thus, even if a bad guy knew that four
lower case words were being used for your pass phrase, there are still
10,000,000,000,000,000 possible combinations for him to try (this
represents a minimum entropy of 53 bits). Using six words results in 80
bits of entropy, which meets the threshold recommended by NIST for the
most secure pass phrases. For more on this, see 'How Much Entropy is
Enough' below.
For another perspective on the attractiveness of pass phrases, see
`<http://xkcd.com/936/>`_.
Unlike password vaults, Abraxas produces a highly unpredictable password
from a master password and the name of the account for which the
password is to be used. The process is completely repeatable. If you
give the same master password and account name, you will get the same
password. As such, the passwords do not have to be saved; instead they
are regenerated on the fly.
As a password generator, Abraxas provides three important advantages
over conventional password vaults. First, it allows groups of people to
share access to accounts without having to securely share each password.
Instead, one member of the group creates a master password that is
securely shared with the group once. From then on any member of the
group can create a new account, share the name of the account, and all
members will know the password needed to access the account. The second
advantage is that it opens up the possibility of using high-quality
passwords for stealth accounts, which are accounts where you remember
the name of the account but do not store any information about even the
existence of the account on your computer. With Abraxas, you only need
to remember the name of the account and it will regenerate the password
for you. This is perfect for your TrueCrypt hidden volume password.
Finally, by securely storing a small amount of information, perhaps on
a piece of paper in your safe-deposit box, you can often recover most if
not all of your passwords even if you somehow lose your accounts file.
You can even recover passwords that were created after you created your
backup. This is because Abraxas combines the master password with some
easily recons
|
dzoep/khal
|
khal/settings/settings.py
|
Python
|
mit
| 5,410
| 0
|
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM,
|
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
from configobj import ConfigObj, flatten_errors, get_extra_values, \
ConfigObjError
from validate import Validator
import xdg.BaseDirectory
from .exceptions import InvalidSettingsError, CannotPars
|
eConfigFileError
from khal import __productname__
from ..log import logger
from .utils import is_timezone, weeknumber_option, config_checks, \
expand_path, expand_db_path, is_color
SPECPATH = os.path.join(os.path.dirname(__file__), 'khal.spec')
def _find_configuration_file():
"""Return the configuration filename.
This function builds the list of paths known by khal and
then return the first one which exists. The first paths
searched are the ones described in the XDG Base Directory
Standard. Each one of this path ends with
DEFAULT_PATH/DEFAULT_FILE.
On failure, the path DEFAULT_PATH/DEFAULT_FILE, prefixed with
a dot, is searched in the home user directory. Ultimately,
DEFAULT_FILE is searched in the current directory.
"""
DEFAULT_FILE = __productname__ + '.conf'
DEFAULT_PATH = __productname__
resource = os.path.join(DEFAULT_PATH, DEFAULT_FILE)
paths = []
paths.extend([os.path.join(path, resource)
for path in xdg.BaseDirectory.xdg_config_dirs])
paths.append(os.path.expanduser(os.path.join('~', '.' + resource)))
paths.append(os.path.expanduser(DEFAULT_FILE))
for path in paths:
if os.path.exists(path):
return path
return None
def get_config(config_path=None):
"""reads the config file, validates it and return a config dict
:param config_path: path to a custom config file, if none is given the
default locations will be searched
:type config_path: str
:returns: configuration
:rtype: dict
"""
if config_path is None:
config_path = _find_configuration_file()
logger.debug('using the config file at {}'.format(config_path))
try:
user_config = ConfigObj(config_path,
configspec=SPECPATH,
interpolation=False,
file_error=True,
)
except ConfigObjError as error:
logger.fatal('parsing the config file file with the following error: '
'{}'.format(error))
logger.fatal('if you recently updated khal, the config file format '
'might have changed, in that case please consult the '
'CHANGELOG or other documentation')
raise CannotParseConfigFileError()
fdict = {'timezone': is_timezone,
'expand_path': expand_path,
'expand_db_path': expand_db_path,
'weeknumbers': weeknumber_option,
'color': is_color,
}
validator = Validator(fdict)
results = user_config.validate(validator, preserve_errors=True)
abort = False
for section, subsection, error in flatten_errors(user_config, results):
abort = True
if isinstance(error, Exception):
logger.fatal(
'config error:\n'
'in [{}] {}: {}'.format(section[0], subsection, error))
else:
for key in error:
if isinstance(error[key], Exception):
logger.fatal('config error:\nin {} {}: {}'.format(
sectionize(section + [subsection]),
key,
str(error[key]))
)
if abort or not results:
raise InvalidSettingsError()
config_checks(user_config)
extras = get_extra_values(user_config)
for section, value in extras:
if section == ():
logger.warn('unknown section "{}" in config file'.format(value))
else:
section = sectionize(section)
logger.warn('unknown key or subsection "{}" in '
'section "{}"'.format(value, section))
return user_config
def sectionize(sections, depth=1):
"""converts list of string into [list][[of]][[[strings]]]"""
this_part = depth * '[' + sections[0] + depth * ']'
if len(sections) > 1:
return this_part + sectionize(sections[1:], depth=depth + 1)
else:
return this_part
|
Clivern/PyArchiver
|
pyarchiver/__init__.py
|
Python
|
mit
| 110
| 0.009091
|
"""
Python Compression and Archiving Library
@autho
|
r: Clivern U{hello@clivern.co
|
m}
"""
__VERSION__ = "1.0.0"
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/compiler/tests/slice_ops_test.py
|
Python
|
apache-2.0
| 5,088
| 0.004324
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slicing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.slice(i, [2], [4])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 3, 4, 5], result)
def test3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.slice(i, [1, 2, 2], [1, 1, 4])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[6, 5, 4, 3]]], result)
class StridedSliceTest(XLATestCase):
def test1D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [2], [6], [2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([2, 4], result)
def test1DNegtiveStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[10])
with self.test_scope():
o = array_ops.strided_slice(i, [6], [2], [-2])
params = {
i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
}
result = o.eval(feed_dict=params)
self.assertAllEqual([6, 4], result)
def test
|
3D(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 3, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [0, 2, 2], [2, 3, 6], [1, 1, 2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
|
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[1, 9]], [[6, 4]]], result)
def test3DNegativeStride(self):
for dtype in self.numeric_types:
with self.test_session():
i = array_ops.placeholder(dtype, shape=[3, 4, 10])
with self.test_scope():
o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
params = {
i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
[4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
[[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
[4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
[8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
[7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
[[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
[1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
[9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
[9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
}
result = o.eval(feed_dict=params)
self.assertAllEqual([[[9, 8],
[1, 1]],
[[2, 4],
[5, 7]]], result)
if __name__ == "__main__":
googletest.main()
|
tpeek/bike_safety
|
imagersite/imager_profile/migrations/0005_auto_20150802_0303.py
|
Python
|
mit
| 1,491
| 0.001341
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0004_auto_20150802_0153'),
]
operations = [
migrations.RemoveField(
model_name='imagerprofile',
name='name',
),
migrations.AddField(
model_name='imagerprofile',
name='nickname',
field=models.CharField(max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='address',
field=models.TextField(null=True, blank=Tru
|
e),
),
migrations.AlterField(
model_name='imagerprofile',
name='camera',
field=models.CharField(help_text=b'What i
|
s the make and model of your camera?', max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='photography_type',
field=models.CharField(blank=True, max_length=64, null=True, help_text=b'What is your photography type?', choices=[(b'H', b'Hobbist'), (b'A', b'Abstract'), (b'B', b'Black and White'), (b'P', b'Panorama'), (b'J', b'Journalism')]),
),
migrations.AlterField(
model_name='imagerprofile',
name='website_url',
field=models.URLField(null=True, blank=True),
),
]
|
chirpradio/chirpradio-machine
|
chirp/stream/do_proxy_barix_status.py
|
Python
|
apache-2.0
| 4,423
| 0.000452
|
import BaseHTTPServer
import logging
import os
import sys
import threading
import time
from chirp.common.conf import (BARIX_STATUS_HOST, BARIX_STATUS_PORT,
BARIX_HOST, BARIX_PORT)
from chirp.stream import barix
_TIMEOUT_S = 2
_POLLING_FREQUENCY_S = 5
_STATUS_PAGE = """<html><head>
<title>Barix Status</title>
<meta http-equiv=refresh content="10; url=.">
</head><body>
<h1>Barix Status</h1>
<small><i>This page will automatically update every 10 seconds.</i></small><br>
<small><i>Levels are averaged over the last %(level_avg_window_minutes)d
minutes.</i></small><br>
<br><br>
As of %(status_time)s:
<table>
<tr><td>Status</td><td>%(status)s</td></tr>
<tr><td>Left Level</td><td>%(left_level)s (avg %(left_level_avg)s)</td></tr>
<tr><td>Right Level</td><td>%(right_level)s (avg %(right_level_avg)s)</td></tr>
<tr><td>Live365?</td><td>%(live365_connected)s</td></tr>
<tr><td>Archiver?</td><td>%(archiver_connected)s</td></tr>
</table>
</body></html>
"""
# If we poll every 5s, 360 samples = 30 minutes
LEVEL_HISTORY_MAX_SIZE = 360
level_history = []
class _RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
b_obj = self.barix
NOT_CONNECTED = "<b>NOT CONNECTED</b>"
left_level_avg = 0
right_level_avg = 0
level_avg_window_minutes = 0
if level_history:
N = len(level_history)
left_level_avg = sum(L for L, _ in level_history) / N
right_level_avg = sum(R for _, R in level_history) / N
level_avg_window_minutes = N * _POLLING_FREQUENCY_S / 60
barix_info = {
"status_time": b_obj.last_update_time_str,
"status": b_obj.status,
"left_level": b_obj.left
|
_level,
"right_level": b_obj.right_level,
"left_level_avg": int(left_level_avg),
"right_level_avg": int(right_level_avg),
"level_avg_window_minutes": int(level_avg_window_minutes),
"live365_connected": NOT_CONNECTED,
"archiver_con
|
nected": NOT_CONNECTED,
}
# TODO(trow): Check IP address.
if "12345" in b_obj.clients:
barix_info["live365_connected"] = "connected"
# TODO(trow): Check IP address.
if "12346" in b_obj.clients:
barix_info["archiver_connected"] = "connected"
response_str = _STATUS_PAGE % barix_info
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(response_str)))
self.end_headers()
self.wfile.write(response_str)
def log_message(self, format, *args):
pass # Throw away log messages for now.
def handle_requests(srv, done):
while not done.isSet():
try:
srv.handle_request()
except Exception, err:
logging.exception("Swallowed exception")
def poll_barix(b_obj, log_fh):
try:
if not b_obj.ping():
return
level_history.append(
(float(b_obj.left_level), float(b_obj.right_level)))
if len(level_history) > LEVEL_HISTORY_MAX_SIZE:
level_history.pop(0)
if log_fh:
now = int(b_obj.last_update_time)
ip, far_port = b_obj.clients.get("12345", ("None", 0))
log_info = "%d %04x %s\n" % (now, int(far_port), ip)
log_fh.write(log_info)
log_fh.flush()
except Exception, err:
logging.exception("Swallowed exception")
def main():
log_path = os.path.join(os.environ["HOME"], "live365_connection.log")
log_fh = open(log_path, "a")
_RequestHandler.barix = barix.Barix(BARIX_HOST, BARIX_PORT)
srv = BaseHTTPServer.HTTPServer((BARIX_STATUS_HOST, BARIX_STATUS_PORT),
_RequestHandler)
srv.socket.settimeout(_TIMEOUT_S)
done = threading.Event()
th = threading.Thread(target=handle_requests, args=(srv, done))
th.start()
while True:
try:
poll_barix(_RequestHandler.barix, log_fh)
time.sleep(_POLLING_FREQUENCY_S)
except KeyboardInterrupt:
break
except Exception:
logging.exception("Swallowed exception")
if log_fh:
log_fh.close()
done.set()
th.join() # Wait for the serving thread to settle.
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.