repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Mortal/eggpy
|
ircutils/client.py
|
Python
|
mit
| 13,186
| 0.00857
|
""" This module provides a direct client interface for managing an IRC
connection. If you are trying to build a bot, :class:`ircutils.bot.SimpleBot`
inherits from :class:`SimpleClient` so it has the methods listed below.
"""
import collections
from . import connection
from . import ctcp
from . import events
from . import format
from . import protocol
class SimpleClient(object):
""" SimpleClient is designed to provide a high level of abstraction
of the IRC protocol. It's methods are structured in a way that allows
you to often bypass the need to send raw IRC commands. By default,
``auto_handle`` is set to ``True`` and allows the client to handle the
following:
* Client nickname changes
* Client channel tracking
* CTCP version requests
"""
software = "http://dev.guardedcode.com/projects/ircutils/"
version = (0,1,3)
custom_listeners = {}
def __init__(self, nick, mode="+B", auto_handle=True):
self.nickname = nick
self.user = nick
self.real_name = self.software
self.filter_formatting = True
self.channels = collections.defaultdict(protocol.Channel)
self.events = events.EventDispatcher()
self._prev_nickname = None
self._mode = mode
self._register_default_listeners()
if auto_handle:
self._add_built_in_handlers()
def __getitem__(self, name):
return self.events[name]
def __setitem__(self, name, value):
self.register_listener(name, value)
def _register_default_listeners(self):
""" Registers the default listeners to the names listed in events. """
# Connection events
for name in events.connection:
self.events.register_listener(name, events.connection[name]())
# Standard events
for name in events.standard:
self.events.register_listener(name, events.standard[name]())
# Message events
for name in events.messages:
self.events.register_listener(name, events.messages[name]())
# CTCP events
for name in events.ctcp:
self.events.register_listener(name, events.ctcp[name]())
# RPL_ events
for name in events.replies:
self.events.register_listener(name, events.replies[name]())
# Custom listeners
for name in self.custom_listeners
|
:
self.events.register_listener
|
(name, self.custom_listeners[name])
def _add_built_in_handlers(self):
""" Adds basic client handlers.
These handlers are bound to events that affect the data the the
client handles. It is required to have these in order to keep
track of things like client nick changes, joined channels,
and channel user lists.
"""
self.events["any"].add_handler(_update_client_info)
self.events["name_reply"].add_handler(_set_channel_names)
self.events["ctcp_version"].add_handler(_reply_to_ctcp_version)
self.events["part"].add_handler(_remove_channel_user_on_part)
self.events["quit"].add_handler(_remove_channel_user_on_quit)
self.events["join"].add_handler(_add_channel_user)
def _dispatch_event(self, prefix, command, params):
""" Given the parameters, dispatch an event.
After first building an event, this method sends the event(s) to the
primary event dispatcher.
This replaces :func:`connection.Connection.handle_line`
"""
pending_events = []
# TODO: Event parsing doesn't belong here.
if command in ["PRIVMSG", "NOTICE"]:
event = events.MessageEvent(prefix, command, params)
message_data = event.params[-1]
message_data = ctcp.low_level_dequote(message_data)
message_data, ctcp_requests = ctcp.extract(event.params[-1])
if self.filter_formatting:
message_data = format.filter(message_data)
if message_data.strip() != "":
event.message = message_data
pending_events.append(event)
for command, params in ctcp_requests:
ctcp_event = events.CTCPEvent()
ctcp_event.command = "CTCP_%s" % command
ctcp_event.params = params
ctcp_event.source = event.source
ctcp_event.target = event.target
pending_events.append(ctcp_event)
else:
pending_events.append(events.StandardEvent(prefix, command, params))
for event in pending_events:
self.events.dispatch(self, event)
def connect(self, host, port=None, channel=None, use_ssl=False,
password=None):
""" Connect to an IRC server. """
self.conn = connection.Connection()
self.conn.handle_line = self._dispatch_event
self.conn.connect(host, port, use_ssl, password)
self.conn.execute("USER", self.user, self._mode, "*",
trailing=self.real_name)
self.conn.execute("NICK", self.nickname)
self.conn.handle_connect = self._handle_connect
self.conn.handle_close = self._handle_disconnect
if channel is not None:
# Builds a handler on-the-fly for joining init channels
if isinstance(channel, basestring):
channels = [channel]
else:
channels = channel
def _auto_joiner(client, event):
for channel in channels:
client.join_channel(channel)
self.events["welcome"].add_handler(_auto_joiner)
def is_connected(self):
return self.conn.connected
def _handle_connect(self):
connection.Connection.handle_connect(self.conn)
event = events.ConnectionEvent("CONN_CONNECT")
self.events.dispatch(self, event)
def _handle_disconnect(self):
connection.Connection.handle_close(self.conn)
event = events.ConnectionEvent("CONN_DISCONNECT")
self.events.dispatch(self, event)
def register_listener(self, event_name, listener):
""" Registers an event listener for a given event name.
In essence, this binds the event name to the listener and simply
provides an easier way to reference the listener.
::
client.register_listener("event_name", MyListener())
"""
self.events.register_listener(event_name, listener)
def identify(self, ns_password):
""" Identify yourself with the NickServ service on IRC.
This assumes that NickServ is present on the server.
"""
self.send_message("NickServ", "IDENTIFY {0}".format(ns_password))
def join_channel(self, channel, key=None):
""" Join the specified channel. Optionally, provide a key to the channel
if it requires one.
::
client.join_channel("#channel_name")
client.join_channel("#channel_name", "channelkeyhere")
"""
if channel == "0":
self.channels = []
self.conn.execute("JOIN", "0")
else:
if key is not None:
params = [channel, key]
else:
params = [channel]
self.conn.execute("JOIN", *params)
def part_channel(self, channel, message=None):
""" Leave the specified channel.
You may provide a message that shows up during departure.
"""
self.conn.execute("PART", channel, trailing=message)
def send_message(self, target, message, to_service=False):
""" Sends a message to the specified target.
If it is a service, it uses SQUERY instead.
"""
message = ctcp.low_level_quote(message)
if to_service:
self.conn.execute("SQUERY", target, message)
else:
self.conn.exe
|
CQTools/analogIO-miniUSB
|
analogIO.py
|
Python
|
mit
| 2,908
| 0.013755
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 14:44:52 2014
@author: nick
Pyserial interface for communicating with mini usb Analog IO board
Usage: Send plaintext commands, separated by newline/cr or semicolon.
An eventual reply comes terminated with cr+lf.
Important commands:
*IDN? Returns device identifier
*RST Resets device, outputs are 0V.
OUT <channel> <value>
Sets <channel> (ranging from 0 to 2) to
the voltage <value>. Use 2.5 as value, not 2.5E0
IN? <channel>
Returns voltage of input <channel> (ranging from 0 to 3).
ALLIN? Returns all voltages
HELP Print this help text.
ON /OFF Switches the analog unit on/off.
DIGOUT <value>
Sets the digital outputs to the
binary value (ranging from 0..3).
REMARK:
Output ranges from 0V to 4.095V. Input is capacitive and ranges
from 0V to 4.095V.
"""
import serial
class Anlogcomm(object)
|
:
# Module for communicating with the mini usb IO board
baudrate = 115200
def __init__(self, port):
self.serial = self._open_port(port)
self._serial_write('a')# flush io buffer
print self._serial_read() #will read unknown command
def _open_port(self, port):
|
ser = serial.Serial(port, timeout=5)
#ser.readline()
#ser.timeout = 1 #causes problem with nexus 7
return ser
def _serial_write(self, string):
self.serial.write(string + '\n')
def _serial_read(self):
msg_string = self.serial.readline()
# Remove any linefeeds etc
msg_string = msg_string.rstrip()
return msg_string
def reset(self):
self._serial_write('*RST')
return self._serial_read()
def get_voltage(self,channel):
self._serial_write('IN?' + str(channel))
voltage = self._serial_read()
return voltage
def get_voltage_all(self):
self._serial_write('ALLIN?')
allin = self._serial_read()
return allin
def set_voltage(self,channel,value):
self._serial_write('OUT'+ str(channel) + str(value))
return
def set_digitout(self,value):
self._serial_write('DIGOUT' + str(value))
return
def serial_number(self):
self._serial_write('*IDN?')
return self._serial_read()
|
eepp/lttngc
|
lttngc/about_dialog.py
|
Python
|
mit
| 763
| 0
|
from lttngc import __version__
from lttngc import util
|
s
from PyQt4 import Qt
import os.path
class QLttngcAboutDialog(utils.QCommonDialog, utils.QtUiLoad):
_UI_NAME = 'about'
def __init__(self):
super().__init__()
self._setup_ui()
def _set_version(self):
self.version_label.setText('v{}'.format(__version__))
def _set_contents(self):
self._set_version()
def _set_logo(self):
path = utils.get_res_path(os.path.join('logo', 'l
|
ogo-80.png'))
pixmap = Qt.QPixmap(path)
self._logo_lbl.setPixmap(pixmap)
self._logo_lbl.setMask(pixmap.mask())
self._logo_lbl.setText('')
def _setup_ui(self):
self._load_ui()
self._set_contents()
self._set_logo()
|
JVMartin/grammar-analyzer
|
tests.py
|
Python
|
mit
| 7,593
| 0.018833
|
#!/usr/bin/env python3
"""
Unit tests for the Grammar class and for the GrammarAnalyzer class.
Tests each grammar in the "grammars" folder against a variety of strings.
"""
import unittest
from grammar import Grammar
from grammaranalyzer import GrammarAnalyzer
class TestGrammar(unittest.TestCase):
def test_nonexistent_file(self):
# Ensure no exceptions are thrown.
grammar = Grammar("nonexistent.json")
self.assertEqual(grammar.get_desc(), "")
def test_grammar_load(self):
grammar = Grammar("grammars/grammar1.json")
self.assertEqual(grammar.get_desc(), "{a^n # b^n | n > 0}")
def test_grammar_productions(self):
grammar = Grammar("grammars/grammar1.json")
# Check start variable productions.
rules = grammar.produces("S")
self.assertEqual(rules, ["aAb"])
rules = grammar.produces("A")
self.assertEqual(rules, ["aAb", "#"])
# Check nonexistent variable productions.
rules = grammar.produces("N")
self.assertFalse(rules)
def test_grammar_rules(self):
grammar = Grammar("grammars/grammar1.json")
# Check that the correct rules are returned.
rule = grammar.get_rule("S", "a")
self.assertEqual(rule, "aAb")
rule = grammar.get_rule("A", "#")
self.assertEqual(rule, "#")
# Check nonexistent input symbol.
rule = grammar.get_rule("S", "k")
self.assertFalse(rule)
# Check nonexistent variable.
rule = grammar.get_rule("N", "a")
self.assertFalse(rule)
class TestGrammarAnalyzer(unittest.TestCase):
def test_grammar1(self):
grammar = Grammar("grammars/grammar1.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("a#b"))
self.assertTrue(grammar_analyzer.test_string("aa#bb"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaa#bbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaa#bbbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaaa#bbbbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaaaa#bbbbbbbbbb"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("#"))
self.assertFalse(grammar_analyzer.test_string("a"))
self.assertFalse(grammar_analyzer.test_string("aa#b"))
self.assertFalse(grammar_analyzer.test_string("a#bb"))
self.assertFalse(grammar_analyzer.test_string("asdf"))
self.assertFalse(grammar_analyzer.test_string("aaaa#bbbbbb"))
def test_grammar2(self):
grammar = Grammar("grammars/grammar2.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("#"))
self.assertTrue(grammar_analyzer.test_string("0#0"))
self.assertTrue(grammar_analyzer.test_string("1#1"))
self.assertTrue(grammar_analyzer.test_string("01#10"))
self.assertTrue(grammar_analyzer.test_string("10#01"))
self.assertTrue(grammar_analyzer.test_string("010#010"))
self.assertTrue(grammar_analyzer.test_string("1111#1111"))
self.assertTrue(grammar_analyzer.test_string("010001#100010"))
self.assertTrue(grammar_analyzer.test_string("0100011#1100010"))
self.assertTrue(grammar_analyzer.test_string("01000101#10100010"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("0"))
self.assertFalse(grammar_analyzer.test_string("0#1"))
self.assertFalse(grammar_analyzer.test_string("1#10"))
self.assertFalse(grammar_analyzer.test_string("01#01"))
self.assertFalse(grammar_analyzer.test_string("11#111"))
self.assertFalse(grammar_analyzer.test_string("111#11"))
self.assertFalse(grammar_analyzer.test_string("111#110"))
self.assertFalse(grammar_analyzer.test_string("0111#110"))
def test_grammar3(self):
grammar = Grammar("grammars/grammar3.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("a#b#c#"))
self.assertTrue(grammar_analyzer.test_string("a#b#cc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#c#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#ccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#ccccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#c#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccccc#"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("a"))
self.assertFalse(grammar_analyzer.test_string("a#b"))
self.assertFalse(grammar_analyzer.test_string("a#b#c"))
self.assertFalse(grammar_analyzer.test_string("####"))
self.assertFalse(grammar_analyzer.test_string("abcd"))
self.assertFalse(grammar_analyzer.test_string("aaaaa#bbb#c#"))
self.assertFalse(grammar_analyzer.test_string("aaaaa##ccccc#"))
self.assertFalse(grammar_analyzer.test_string("aaaa##ccccc#"))
self.assertFalse(grammar_analyzer.test_string("aaa##ccccc#"))
def test_grammar4(self):
grammar = Grammar("grammars/grammar4.json")
grammar_ana
|
lyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_anal
|
yzer.test_string("a#b#c#d"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#cc#dd"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccc#ddd"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#ccccc#ddddd"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccccc#dddddd"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb#c#d"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("#"))
self.assertFalse(grammar_analyzer.test_string("a#b#c#"))
self.assertFalse(grammar_analyzer.test_string("#b#c#d"))
self.assertFalse(grammar_analyzer.test_string("a#bb#c#d"))
self.assertFalse(grammar_analyzer.test_string("a#b#c#dd"))
self.assertFalse(grammar_analyzer.test_string("a#bb#c#dd"))
self.assertFalse(grammar_analyzer.test_string("aa#bb#cc#dd#"))
self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd"))
self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd##"))
|
ppasq/geonode
|
geonode/base/forms.py
|
Python
|
gpl-3.0
| 17,648
| 0.000793
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from fields import MultiThesauriField
from widgets import MultiThesauriWidget
from autocomplete_light.widgets import ChoiceWidget
from autocomplete_light.contrib.taggit_field import TaggitField, TaggitWidget
from django import forms
from django.core import validators
from django.forms import models
from django.forms.fields import ChoiceField
from django.forms.utils import flatatt
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.utils.encoding import (
force_text,
)
from bootstrap3_datetime.widgets import DateTimePicker
from modeltranslation.forms import TranslationModelForm
from geonode.base.models import HierarchicalKeyword, TopicCategory, Region, License
from geonode.people.models import Profile
from geonode.base.enumerations import ALL_LANGUAGES
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
def get_tree_data():
def rectree(parent, path):
children_list_of_tuples = list()
c = Region.objects.filter(parent=parent)
for child in c:
children_list_of_tuples.append(
tuple((path + parent.name, tuple((child.id, child.name))))
)
childrens = rectree(child, parent.name + '/')
if childrens:
children_list_of_tuples.extend(childrens)
return children_list_of_tuples
data = list()
try:
t = Region.objects.filter(Q(level=0) | Q(parent=None))
for toplevel in t:
data.append(
tuple((toplevel.id, toplevel.name))
)
childrens = rectree(toplevel, '')
if childrens:
data.append(
tuple((toplevel.name, childrens))
)
except BaseException:
pass
return tuple(data)
class AdvancedModelChoiceIterator(models.ModelChoiceIterator):
def choice(self, obj):
return (
self.field.prepare_value(obj),
self.field.label_from_instance(obj),
obj)
class CategoryChoiceField(forms.ModelChoiceField):
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return AdvancedModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def label_from_instance(self, obj):
return '<i class="fa ' + obj.fa_class + ' fa-2x unchecked"></i>' \
'<i class="fa ' + obj.fa_class + ' fa-2x checked"></i>' \
'<span class="has-popover" data-container="body" data-toggle="popover" data-placement="top" ' \
'data-content="' + obj.description + '" trigger="hover">' \
'<br/><strong>' + obj.gn_description + '</strong></span>'
class TreeWidget(TaggitWidget):
input_type = 'text'
def render(self, name, value, attrs=None):
if isinstance(value, basestring):
vals = value
elif value:
vals = ','.join([i.tag.name for i in value])
else:
vals = ""
output = ["""<div class="keywords-container"><span class="input-group">
<input class="form-control"
id="id_resource-keywords"
name="resource-keywords"
value="%s"><br/>""" % (vals)]
output.append(
'<div id="treeview" class="" style="display: none"></div>')
output.append(
'<span class="input-group-addon" id="treeview-toggle"><i class="fa fa-folder"></i></span>')
output.append('</span></div>')
return mark_safe(u'\n'.join(output))
class RegionsMultipleChoiceField(forms.MultipleChoiceField):
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise forms.ValidationError(
self.error_messages['required'], code='required')
class RegionsSelect(forms.Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None):
if value is None:
value = []
final_attrs = self.build_attrs(attrs)
final_attrs["name"] = name
output = [
format_html(
'<select multiple="multiple"{}>',
flatatt(final_attrs))]
options = self.render_options(value)
if options:
|
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def render_option_value(
self,
|
selected_choices,
option_value,
option_label,
data_section=None):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
label = force_text(option_label)
if data_section is None:
data_section = ''
else:
data_section = force_text(data_section)
if '/' in data_section:
label = format_html(
'{} [{}]', label, data_section.rsplit(
'/', 1)[1])
return format_html(
'<option data-section="{}" value="{}"{}>{}</option>',
data_section,
option_value,
selected_html,
label)
def render_options(self, selected_choices):
# Normalize to strings.
def _region_id_from_choice(choice):
if isinstance(choice, int):
return choice
else:
return choice.id
selected_choices = set(force_text(_region_id_from_choice(v)) for v in selected_choices)
output = []
output.append(format_html('<optgroup label="{}">', 'Global'))
for option_value, option_label in self.choices:
if not isinstance(
option_label, (list, tuple)) and isinstance(
option_label, basestring):
output.append(
self.render_option_value(
selected_choices,
option_value,
option_label))
output.append('</optgroup>')
for option_value, option_label in self.choices:
if isinstance(
option_label, (list, tuple)) and not isinstance(
option_label, basestring):
output.append(
format_html(
'<optgroup label="{}">',
force_text(option_value)))
for option in option_label:
if isinstance(
option, (list, tuple)) and not isinstance(
option, basestring):
|
edmorley/treeherder
|
tests/selenium/test_switch_app.py
|
Python
|
mpl-2.0
| 790
| 0
|
from pages.treeherder import Treeherder
def test_switch_app(base_url, selenium, test_repository):
"""Switch between Treeherder and Perfherder using header dropdown"""
page = Treeherder(selenium, base_url).open()
assert page.header.active_app == 'Treeherder'
page = page.switch_to_perfherder()
assert page.header.active_app == 'Perfherder'
page = page.switch_to_treeherder()
|
# Be aware that when switching back from Perfherder, it will try to
|
# default to mozilla-inbound, which does not exist in this test scenario.
# So part of this test is to ensure what happens when the ``repo`` param
# in the url is an invalid repo. We should still display the nav bars
# and a meaningful error message.
assert page.header.active_app == 'Treeherder'
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/bsddb/test/test_associate.py
|
Python
|
gpl-2.0
| 11,198
| 0.009109
|
"""
TestCases for DB.associate.
"""
import sys, os, string
import tempfile
import time
from pprint import pprint
try:
from threading import Thread, currentThread
have_threads = 1
except ImportError:
have_threads = 0
import unittest
from test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
#----------------------------------------------------------------------
musicdata = {
1 : ("Bad English", "The Price Of Love", "Rock"),
2 : ("DNA featuring Suzanne Vega", "Tom's Diner", "Rock"),
3 : ("George Michael", "Praying For Time", "Rock"),
4 : ("Gloria Estefan", "Here We Are", "Rock"),
5 :
|
("Linda Ronstadt", "Don't Know Much", "Rock"),
6 : ("Michael Bolton", "How Am I Supposed To Live Without You", "Blues"),
|
7 : ("Paul Young", "Oh Girl", "Rock"),
8 : ("Paula Abdul", "Opposites Attract", "Rock"),
9 : ("Richard Marx", "Should've Known Better", "Rock"),
10: ("Rod Stewart", "Forever Young", "Rock"),
11: ("Roxette", "Dangerous", "Rock"),
12: ("Sheena Easton", "The Lover In Me", "Rock"),
13: ("Sinead O'Connor", "Nothing Compares 2 U", "Rock"),
14: ("Stevie B.", "Because I Love You", "Rock"),
15: ("Taylor Dayne", "Love Will Lead You Back", "Rock"),
16: ("The Bangles", "Eternal Flame", "Rock"),
17: ("Wilson Phillips", "Release Me", "Rock"),
18: ("Billy Joel", "Blonde Over Blue", "Rock"),
19: ("Billy Joel", "Famous Last Words", "Rock"),
20: ("Billy Joel", "Lullabye (Goodnight, My Angel)", "Rock"),
21: ("Billy Joel", "The River Of Dreams", "Rock"),
22: ("Billy Joel", "Two Thousand Years", "Rock"),
23: ("Janet Jackson", "Alright", "Rock"),
24: ("Janet Jackson", "Black Cat", "Rock"),
25: ("Janet Jackson", "Come Back To Me", "Rock"),
26: ("Janet Jackson", "Escapade", "Rock"),
27: ("Janet Jackson", "Love Will Never Do (Without You)", "Rock"),
28: ("Janet Jackson", "Miss You Much", "Rock"),
29: ("Janet Jackson", "Rhythm Nation", "Rock"),
30: ("Janet Jackson", "State Of The World", "Rock"),
31: ("Janet Jackson", "The Knowledge", "Rock"),
32: ("Spyro Gyra", "End of Romanticism", "Jazz"),
33: ("Spyro Gyra", "Heliopolis", "Jazz"),
34: ("Spyro Gyra", "Jubilee", "Jazz"),
35: ("Spyro Gyra", "Little Linda", "Jazz"),
36: ("Spyro Gyra", "Morning Dance", "Jazz"),
37: ("Spyro Gyra", "Song for Lorraine", "Jazz"),
38: ("Yes", "Owner Of A Lonely Heart", "Rock"),
39: ("Yes", "Rhythm Of Love", "Rock"),
40: ("Cusco", "Dream Catcher", "New Age"),
41: ("Cusco", "Geronimos Laughter", "New Age"),
42: ("Cusco", "Ghost Dance", "New Age"),
43: ("Blue Man Group", "Drumbone", "New Age"),
44: ("Blue Man Group", "Endless Column", "New Age"),
45: ("Blue Man Group", "Klein Mandelbrot", "New Age"),
46: ("Kenny G", "Silhouette", "Jazz"),
47: ("Sade", "Smooth Operator", "Jazz"),
48: ("David Arkenstone", "Papillon (On The Wings Of The Butterfly)",
"New Age"),
49: ("David Arkenstone", "Stepping Stars", "New Age"),
50: ("David Arkenstone", "Carnation Lily Lily Rose", "New Age"),
51: ("David Lanz", "Behind The Waterfall", "New Age"),
52: ("David Lanz", "Cristofori's Dream", "New Age"),
53: ("David Lanz", "Heartsounds", "New Age"),
54: ("David Lanz", "Leaves on the Seine", "New Age"),
99: ("unknown artist", "Unnamed song", "Unknown"),
}
#----------------------------------------------------------------------
class AssociateTestCase(unittest.TestCase):
keytype = ''
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.env = db.DBEnv()
self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_THREAD)
def tearDown(self):
self.closeDB()
self.env.close()
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def addDataToDB(self, d):
for key, value in musicdata.items():
if type(self.keytype) == type(''):
key = "%02d" % key
d.put(key, string.join(value, '|'))
def createDB(self):
self.primary = db.DB(self.env)
self.primary.set_get_returns_none(2)
self.primary.open(self.filename, "primary", self.dbtype,
db.DB_CREATE | db.DB_THREAD)
def closeDB(self):
self.primary.close()
def getDB(self):
return self.primary
def test01_associateWithDB(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_associateWithDB..." % \
self.__class__.__name__
self.createDB()
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP)
secDB.set_get_returns_none(2)
secDB.open(self.filename, "secondary", db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
self.getDB().associate(secDB, self.getGenre)
self.addDataToDB(self.getDB())
self.finish_test(secDB)
def test02_associateAfterDB(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_associateAfterDB..." % \
self.__class__.__name__
self.createDB()
self.addDataToDB(self.getDB())
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP)
secDB.open(self.filename, "secondary", db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
# adding the DB_CREATE flag will cause it to index existing records
self.getDB().associate(secDB, self.getGenre, db.DB_CREATE)
self.finish_test(secDB)
def finish_test(self, secDB):
# 'Blues' should not be in the secondary database
vals = secDB.pget('Blues')
assert vals == None, vals
vals = secDB.pget('Unknown')
assert vals[0] == 99 or vals[0] == '99', vals
vals[1].index('Unknown')
vals[1].index('Unnamed')
vals[1].index('unknown')
if verbose:
print "Primary key traversal:"
c = self.getDB().cursor()
count = 0
rec = c.first()
while rec is not None:
if type(self.keytype) == type(''):
assert string.atoi(rec[0]) # for primary db, key is a number
else:
assert rec[0] and type(rec[0]) == type(0)
count = count + 1
if verbose:
print rec
rec = c.next()
assert count == len(musicdata) # all items accounted for
if verbose:
print "Secondary key traversal:"
c = secDB.cursor()
count = 0
# test cursor pget
vals = c.pget('Unknown', flags=db.DB_LAST)
assert vals[1] == 99 or vals[1] == '99', vals
assert vals[0] == 'Unknown'
vals[2].index('Unknown')
vals[2].index('Unnamed')
vals[2].index('unknown')
vals = c.pget('Unknown', data='wrong value', flags=db.DB_GET_BOTH)
assert vals == None, vals
rec = c.first()
assert rec[0] == "Jazz"
while rec is not None:
count = count + 1
if verbose:
print rec
rec = c.next()
# all items accounted for EXCEPT for 1 with "Blues" genre
assert count == len(musicdata)-1
def getGenre(self, priKey, priData):
assert type(priData) == type("")
if verbose:
print 'getGenre key: %r data: %r' % (priKey, priData)
genre = string.split(priData, '|')[2]
if genre == 'Blues':
return db.DB_DONOTINDEX
else:
return genre
#----------------------------------------------------------------------
class AssociateHashTestCase(AssociateTestCase):
dbtype = db.DB_HASH
class AssociateBTreeTestCase(AssociateTestCase):
dbtype = db.DB_BTREE
class AssociateRecnoTestCase(AssociateTestCase):
dbtype = db.DB_RECNO
keytype = 0
#----------------------------------------------------------------------
class ShelveAssociateTestCase(AssociateTe
|
arthurpro/HopperPlugins
|
tools/dumpbinary.py
|
Python
|
bsd-2-clause
| 5,539
| 0
|
#!/usr/bin/env python
# Copyright (c) 2014, Alessandro Gatti - frob.it
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import sys
DESCRIPTION = 'Dumps given data as a list of binary values of arbitrary length'
NAME = 'dumpbinary'
LITTLE_ENDIAN = 0
BIG_ENDIAN = 1
BOTH_ENDIAN = 2
def print_big_endian(strip_spaces, *data):
output = ''
for byte in data:
fragment = '{:08b} '.format(byte)
if strip_spaces:
output += fragment.strip()
else:
output += fragment
return output.strip()
def print_little_endian(strip_spaces, *data):
output = ''
for byte in reversed(data):
fragment = '{:08b} '.format(byte)
if strip_spaces:
output += fragment.strip()
else:
output += fragment
return output.strip()
def print_line(strip_spaces, offset, endian, *data):
output = '{:08X}: '.format(offset)
if endian == BIG_ENDIAN:
print(print_big_endian(strip_spaces, *data))
elif endian == LITTLE_ENDIAN:
print(print_little_endian(strip_spaces, *data))
elif endian == BOTH_ENDIAN:
print('%s | %s' % (print_big_endian(strip_spaces, *data),
print_little_endian(strip_spaces, *data)))
def dump_byte(input_file):
offset = 0
while True:
byte = input_file.read(1)
if len(byte) == 0:
break
print_line(False, offset, ord(byte))
offset += 1
def dump_word(input_file, endian, strip_spaces):
offset = 0
while True:
word = input_file.read(2)
if len(word) == 0:
break
elif len(word) == 1:
raise Exception('Unaligned data')
else:
print_line(strip_spaces, offset, endian, ord(word[0]),
ord(word[1]))
offset += 2
def dump_dword(input_file, endian, strip_spaces):
offset = 0
while True:
dword = input_file.read(4)
if len(dword) == 0:
break
elif len(dword) != 4:
raise Exception('Unaligned data')
else:
print_line(strip_spaces, offset, endian, ord(dword[0]),
ord(dword[1]), ord(dword[2]), ord(dword[3]))
offset +=
|
4
def dump(input_file, length, endian, strip_spaces):
if length == 1:
dump_byte(input_f
|
ile)
elif length == 2:
dump_word(input_file, endian, strip_spaces)
elif length == 4:
dump_dword(input_file, endian, strip_spaces)
else:
pass
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=NAME, description=DESCRIPTION)
endianness = parser.add_mutually_exclusive_group(required=True)
endianness.add_argument('--endian', metavar='endian',
choices=('little', 'big', 'l', 'b'),
help='endianness of the input data')
endianness.add_argument('-l', '--little',
help='shortcut for --endian little',
action='store_true')
endianness.add_argument('-b', '--big', help='shortcut for --endian big',
action='store_true')
endianness.add_argument('--both', action='store_true',
help='show both endianness data side by side')
parser.add_argument('-c', '--compact', action='store_true',
help='do not print spaces between bytes')
parser.add_argument('length', metavar='length', type=int,
choices=(1, 2, 4),
help='length in bytes of the binary values')
parser.add_argument('infile', metavar='input_file', nargs='?',
type=argparse.FileType('r'), default=sys.stdin,
help='the file to read from, or STDIN')
parser.add_argument('--version', action='version', version='0.0.1')
arguments = parser.parse_args()
endian = None
if arguments.both:
endian = BOTH_ENDIAN
elif arguments.big or arguments.endian in ('big', 'b'):
endian = BIG_ENDIAN
else:
endian = LITTLE_ENDIAN
sys.exit(dump(arguments.infile, arguments.length, endian,
arguments.compact))
|
arunkgupta/gramps
|
gramps/plugins/textreport/summary.py
|
Python
|
gpl-2.0
| 10,795
| 0.004354
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Text Reports/Database Summary Report.
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import posixpath
from gramps.gen.ggettext import gettext as _
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.lib import Person
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.utils.file import media_path_full
from gramps.gen.datehandler import get_date
#------------------------------------------------------------------------
#
# SummaryReport
#
#------------------------------------------------------------------------
class SummaryReport(Report):
"""
This report produces a summary of the objects in the database.
"""
def __init__(self, database, options, user):
"""
Create the SummaryReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.__db = database
def write_repor
|
t(self):
"""
Overridden function to generate the report.
"""
self.doc.start_paragraph("SR-Title")
title = _("Database Summary Report")
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.summarize_people()
self.summarize_families()
self.summar
|
ize_media()
def summarize_people(self):
"""
Write a summary of all the people in the database.
"""
with_media = 0
incomp_names = 0
disconnected = 0
missing_bday = 0
males = 0
females = 0
unknowns = 0
namelist = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Individuals"))
self.doc.end_paragraph()
num_people = 0
for person in self.__db.iter_people():
num_people += 1
# Count people with media.
length = len(person.get_media_list())
if length > 0:
with_media += 1
# Count people with incomplete names.
for name in [person.get_primary_name()] + person.get_alternate_names():
if name.get_first_name().strip() == "":
incomp_names += 1
else:
if name.get_surname_list():
for surname in name.get_surname_list():
if surname.get_surname().strip() == "":
incomp_names += 1
else:
incomp_names += 1
# Count people without families.
if (not person.get_main_parents_family_handle() and
not len(person.get_family_handle_list())):
disconnected += 1
# Count missing birthdays.
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.__db.get_event_from_handle(birth_ref.ref)
if not get_date(birth):
missing_bday += 1
else:
missing_bday += 1
# Count genders.
if person.get_gender() == Person.FEMALE:
females += 1
elif person.get_gender() == Person.MALE:
males += 1
else:
unknowns += 1
# Count unique surnames
if name.get_surname() not in namelist:
namelist.append(name.get_surname())
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of individuals: %d") % num_people)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Males: %d") % males)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Females: %d") % females)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with unknown gender: %d") % unknowns)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Incomplete names: %d") %
incomp_names)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals missing birth dates: %d") %
missing_bday)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Disconnected individuals: %d") % disconnected)
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Unique surnames: %d") % len(namelist))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Individuals with media objects: %d") %
with_media)
self.doc.end_paragraph()
def summarize_families(self):
"""
Write a summary of all the families in the database.
"""
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Family Information"))
self.doc.end_paragraph()
self.doc.start_paragraph("SR-Normal")
self.doc.write_text(_("Number of families: %d") % self.__db.get_number_of_families())
self.doc.end_paragraph()
def summarize_media(self):
"""
Write a summary of all the media in the database.
"""
total_media = 0
size_in_bytes = 0
notfound = []
self.doc.start_paragraph("SR-Heading")
self.doc.write_text(_("Media Objects"))
self.doc.end_paragraph()
total_media = len(self.__db.get_media_object_handles())
mbytes = "0"
for media_id in self.__db.get_media_object_handles():
media = self.__db.get_object_from_handle(media_id)
try:
size_in_bytes += posixpath.getsize(
media_path_full(self.__db, media.get_path()))
length = len(str(size_in_bytes))
if size_in_bytes <= 999999:
mbytes = _("less than 1")
else:
mbytes = str(size_in_bytes)[:(length-6)]
except:
notfound.append(media.get_path())
self.
|
andnovar/kivy
|
kivy/core/text/__init__.py
|
Python
|
mit
| 28,172
| 0.000461
|
'''
Text
====
An abstraction of text creation. Depending of the selected backend, the
accuracy of text rendering may vary.
.. versionchanged:: 1.5.0
:attr:`LabelBase.line_height` added.
.. versionchanged:: 1.0.7
The :class:`LabelBase` does not generate any texture if the text has a
width <= 1.
This is the backend layer for getting text out of different text providers,
you should only be using this directly if your needs aren't fulfilled by the
:class:`~kivy.uix.label.Label`.
Usage example::
from kivy.core.text import Label as CoreLabel
...
...
my_label = CoreLabel()
my_label.text = 'hello'
# the label is usually not drawn until needed, so force it to draw.
my_label.refresh()
# Now access the texture of the label and use it wherever and
# however you may please.
hello_texture = my_label.texture
'''
__all
|
__ = ('LabelBase', 'Label')
import re
import os
from functools import partial
from copy import copy
from kivy import kivy_data_dir
from kivy.utils import platform
from kivy.graphics.texture import Texture
from kivy.core import core_select_lib
from kivy.core.text.text_layout import layout_text, LayoutWord
from kivy.resour
|
ces import resource_find, resource_add_path
from kivy.compat import PY2
from kivy.setupconfig import USE_SDL2
DEFAULT_FONT = 'Roboto'
FONT_REGULAR = 0
FONT_ITALIC = 1
FONT_BOLD = 2
FONT_BOLDITALIC = 3
whitespace_pat = re.compile('( +)')
class LabelBase(object):
'''Core text label.
This is the abstract class used by different backends to render text.
.. warning::
The core text label can't be changed at runtime. You must recreate one.
:Parameters:
`font_size`: int, defaults to 12
Font size of the text
`font_name`: str, defaults to DEFAULT_FONT
Font name of the text
`bold`: bool, defaults to False
Activate "bold" text style
`italic`: bool, defaults to False
Activate "italic" text style
`text_size`: tuple, defaults to (None, None)
Add constraint to render the text (inside a bounding box).
If no size is given, the label size will be set to the text size.
`padding`: float, defaults to None
If it's a float, it will set padding_x and padding_y
`padding_x`: float, defaults to 0.0
Left/right padding
`padding_y`: float, defaults to 0.0
Top/bottom padding
`halign`: str, defaults to "left"
Horizontal text alignment inside the bounding box
`valign`: str, defaults to "bottom"
Vertical text alignment inside the bounding box
`shorten`: bool, defaults to False
Indicate whether the label should attempt to shorten its textual
contents as much as possible if a `size` is given.
Setting this to True without an appropriately set size will lead to
unexpected results.
`shorten_from`: str, defaults to `center`
The side from which we should shorten the text from, can be left,
right, or center. E.g. if left, the ellipsis will appear towards
the left side and it will display as much text starting from the
right as possible.
`split_str`: string, defaults to `' '` (space)
The string to use to split the words by when shortening. If empty,
we can split after every character filling up the line as much as
possible.
`max_lines`: int, defaults to 0 (unlimited)
If set, this indicate how maximum line are allowed to render the
text. Works only if a limitation on text_size is set.
`mipmap` : bool, defaults to False
Create a mipmap for the texture
`strip` : bool, defaults to False
Whether each row of text has its leading and trailing spaces
stripped. If `halign` is `justify` it is implicitly True.
`strip_reflow` : bool, defaults to True
Whether text that has been reflowed into a second line should
be striped, even if `strip` is False. This is only in effect when
`size_hint_x` is not None, because otherwise lines are never
split.
`unicode_errors` : str, defaults to `'replace'`
How to handle unicode decode errors. Can be `'strict'`, `'replace'`
or `'ignore'`.
.. versionchanged:: 1.9.0
`strip`, `strip_reflow`, `shorten_from`, `split_str`, and
`unicode_errors` were added.
.. versionchanged:: 1.9.0
`padding_x` and `padding_y` has been fixed to work as expected.
In the past, the text was padded by the negative of their values.
.. versionchanged:: 1.8.0
`max_lines` parameters has been added.
.. versionchanged:: 1.0.8
`size` have been deprecated and replaced with `text_size`.
.. versionchanged:: 1.0.7
The `valign` is now respected. This wasn't the case previously
so you might have an issue in your application if you have not
considered this.
'''
__slots__ = ('options', 'texture', '_label', '_text_size')
_cached_lines = []
_fonts = {}
_fonts_cache = {}
_fonts_dirs = []
_font_dirs_files = []
_texture_1px = None
def __init__(
self, text='', font_size=12, font_name=DEFAULT_FONT, bold=False,
italic=False, underline=False, strikethrough=False,
halign='left', valign='bottom', shorten=False,
text_size=None, mipmap=False, color=None, line_height=1.0, strip=False,
strip_reflow=True, shorten_from='center', split_str=' ',
unicode_errors='replace',
font_hinting='normal', font_kerning=True, font_blended=True,
**kwargs):
# Include system fonts_dir in resource paths.
# This allows us to specify a font from those dirs.
LabelBase.get_system_fonts_dir()
options = {'text': text, 'font_size': font_size,
'font_name': font_name, 'bold': bold, 'italic': italic,
'underline': underline, 'strikethrough': strikethrough,
'halign': halign, 'valign': valign, 'shorten': shorten,
'mipmap': mipmap, 'line_height': line_height,
'strip': strip, 'strip_reflow': strip_reflow,
'shorten_from': shorten_from, 'split_str': split_str,
'unicode_errors': unicode_errors,
'font_hinting': font_hinting,
'font_kerning': font_kerning,
'font_blended': font_blended}
options['color'] = color or (1, 1, 1, 1)
options['padding'] = kwargs.get('padding', (0, 0))
if not isinstance(options['padding'], (list, tuple)):
options['padding'] = (options['padding'], options['padding'])
options['padding_x'] = kwargs.get('padding_x', options['padding'][0])
options['padding_y'] = kwargs.get('padding_y', options['padding'][1])
if 'size' in kwargs:
options['text_size'] = kwargs['size']
else:
if text_size is None:
options['text_size'] = (None, None)
else:
options['text_size'] = text_size
self._text_size = options['text_size']
self._text = options['text']
self._internal_size = 0, 0 # the real computed text size (inclds pad)
self._cached_lines = []
self.options = options
self.texture = None
self.resolve_font_name()
@staticmethod
def register(name, fn_regular, fn_italic=None, fn_bold=None,
fn_bolditalic=None):
'''Register an alias for a Font.
.. versionadded:: 1.1.0
If you're using a ttf directly, you might not be able to use the
bold/italic properties of
the ttf version. If the font is delivered in multiple files
(one regular, one italic and one bold), then you need to register these
files and use the alias instead.
All the fn_regular/fn_italic/fn_bold parameters are resolved with
:func:`kivy.resources.
|
pascalweiss/LSFEventScraper
|
LSFFetcher.py
|
Python
|
mit
| 2,374
| 0.001264
|
from LSFEventType import LSFEventType
__author__ = 'pascal'
from urllib import urlopen
import time
from threading import Thread
from glob import glob
class LSFFetcher:
_overview_url = ''
_event_urls = []
simultaneous_threads = 10
def add_overview_url(self, overview_url):
self._overview_url = overview_url
def add_event_urls(self, event_urls):
for url in event_urls:
if url not in self._event_urls:
self._event_urls.append(url)
def fetch_url(self, url):
result = ''
while True:
try:
result = urlopen(url).read()
break
except Exception as e:
print('Problem with Internet Connection')
time.sleep(2)
return result
def fetch_local_sites(self, callback, event_type=LSFEventType.normal_event):
html = ''
if event_type is LSFEventType.normal_event:
files = glob('data_events/*.html')
else:
files = glob('data_cancels/*.html')
for file in files:
with open(file, 'r') as f:
html = f.read()
callback(html)
def fetch_local_site(self, callback):
html = ''
with open('data_example.html', 'r') as f:
html = f.read(
|
)
callback(html)
def fetch_event_overview(self, callback):
event_overview = self.fetch_url(self._overview_url)
callback(event_overview)
def fetch_event_
|
sites(self, callback):
threads = []
for event_url in self._event_urls:
thread = Thread(target=self.fetch_event_site, args=(event_url, callback))
threads.append(thread)
while threads != []:
aux_threads = []
for i in range(self.simultaneous_threads):
try:
aux_threads.append(threads.pop())
except Exception as e:
print(e)
for thread in aux_threads:
thread.start()
print('Fetching: ' + str(len(threads)) + ' sites left.' + ' Active fetching threads: ' + str(self.simultaneous_threads))
for thread in aux_threads:
thread.join()
def fetch_event_site(self, event_url, callback):
event_site = self.fetch_url(event_url)
callback(event_site)
|
hunch/hunch-gift-app
|
django/contrib/gis/db/backends/oracle/adapter.py
|
Python
|
mit
| 157
| 0.006369
|
from cx_Oracle import CLOB
from d
|
jango.contrib.gis.db.backends.adapter import WKTAdapter
class OracleSpatialAdapter(WKTAdapter):
inpu
|
t_size = CLOB
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/terminal/aireos.py
|
Python
|
bsd-3-clause
| 1,999
| 0.0005
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
import time
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w]*\(.+\)?[>#\$](?:\s*)$"),
re.compile(br"User:")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"incorrect usage", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
|
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
comman
|
ds = ('{"command": "' + self._connection._play_context.remote_user + '", "prompt": "Password:", "answer": "' +
self._connection._play_context.password + '"}',
'{"command": "config paging disable"}')
for cmd in commands:
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
grzes/djangae
|
djangae/db/backends/appengine/dnf.py
|
Python
|
bsd-3-clause
| 9,768
| 0.001536
|
import copy
from itertools import product
from django.conf import settings
from django.db.models.sql.datastructures import E
|
mptyResultSet
from djangae.db.backends.appengine.query import WhereNode
from django.db import NotSupportedError
# Maximum number of subqueries in a mult
|
iquery
DEFAULT_MAX_ALLOWABLE_QUERIES = 100
def preprocess_node(node, negated):
to_remove = []
# Go through the children of this node and if any of the
# child nodes are leaf nodes, then explode them if necessary
for child in node.children:
if child.is_leaf:
if child.operator == "ISNULL":
value = not child.value if node.negated else child.value
if value:
child.operator = "="
child.value = None
else:
child.operator = ">"
child.value = None
elif node.negated and child.operator == "=":
# Excluded equalities become inequalities
lhs, rhs = WhereNode(node.using), WhereNode(node.using)
lhs.column = rhs.column = child.column
lhs.value = rhs.value = child.value
lhs.operator = "<"
rhs.operator = ">"
child.operator = child.value = child.column = None
child.connector = "OR"
child.children = [lhs, rhs]
assert not child.is_leaf
elif child.operator == "IN":
# Explode IN filters into a series of 'OR statements to make life
# easier later
new_children = []
for value in child.value:
if node.negated:
lhs, rhs = WhereNode(node.using), WhereNode(node.using)
lhs.column = rhs.column = child.column
lhs.value = rhs.value = value
lhs.operator = "<"
rhs.operator = ">"
bridge = WhereNode(node.using)
bridge.connector = "OR"
bridge.children = [lhs, rhs]
new_children.append(bridge)
else:
new_node = WhereNode(node.using)
new_node.operator = "="
new_node.value = value
new_node.column = child.column
new_children.append(new_node)
child.column = None
child.operator = None
child.connector = "AND" if negated else "OR"
child.value = None
child.children = new_children
assert not child.is_leaf
elif child.operator == "RANGE":
lhs, rhs = WhereNode(node.using), WhereNode(node.using)
lhs.column = rhs.column = child.column
if node.negated:
lhs.operator = "<"
rhs.operator = ">"
child.connector = "OR"
else:
lhs.operator = ">="
rhs.operator = "<="
child.connector = "AND"
lhs.value = child.value[0]
rhs.value = child.value[1]
child.column = child.operator = child.value = None
child.children = [lhs, rhs]
assert not child.is_leaf
elif node.negated:
# Move the negation down the tree
child.negated = not child.negated
# If this node was negated, we flip everything
if node.negated:
node.negated = False
node.connector = "AND" if node.connector == "OR" else "OR"
for child in to_remove:
node.children.remove(child)
return node
def normalize_query(query):
where = query.where
# If there are no filters then this is already normalized
if where is None:
return query
def walk_tree(where, original_negated=False):
negated = original_negated
if where.negated:
negated = not negated
preprocess_node(where, negated)
rewalk = False
for child in where.children:
if where.connector == "AND" and child.children and child.connector == 'AND' and not child.negated:
where.children.remove(child)
where.children.extend(child.children)
rewalk = True
elif child.connector == "AND" and len(child.children) == 1 and not child.negated:
# Promote leaf nodes if they are the only child under an AND. Just for consistency
where.children.remove(child)
where.children.extend(child.children)
rewalk = True
elif len(child.children) > 1 and child.connector == 'AND' and child.negated:
new_grandchildren = []
for grandchild in child.children:
new_node = WhereNode(child.using)
new_node.negated = True
new_node.children = [grandchild]
new_grandchildren.append(new_node)
child.children = new_grandchildren
child.connector = 'OR'
rewalk = True
else:
walk_tree(child, negated)
if rewalk:
walk_tree(where, original_negated)
if where.connector == 'AND' and any([x.connector == 'OR' for x in where.children]):
# ANDs should have been taken care of!
assert not any([x.connector == 'AND' and not x.is_leaf for x in where.children ])
product_list = []
for child in where.children:
if child.connector == 'OR':
product_list.append(child.children)
else:
product_list.append([child])
producted = product(*product_list)
new_children = []
for branch in producted:
new_and = WhereNode(where.using)
new_and.connector = 'AND'
new_and.children = list(copy.deepcopy(branch))
new_children.append(new_and)
where.connector = 'OR'
where.children = list(set(new_children))
walk_tree(where, original_negated)
elif where.connector == 'OR':
new_children = []
for child in where.children:
if child.connector == 'OR':
new_children.extend(child.children)
else:
new_children.append(child)
where.children = list(set(new_children))
walk_tree(where)
if where.connector != 'OR':
new_node = WhereNode(where.using)
new_node.connector = 'OR'
new_node.children = [where]
query._where = new_node
all_pks = True
for and_branch in query.where.children:
if and_branch.is_leaf:
children = [and_branch]
else:
children = and_branch.children
for node in children:
if node.column == "__key__" and node.operator in ("=", "IN"):
break
else:
all_pks = False
break
MAX_ALLOWABLE_QUERIES = getattr(
settings,
"DJANGAE_MAX_QUERY_BRANCHES", DEFAULT_MAX_ALLOWABLE_QUERIES
)
if (not all_pks) and len(query.where.children) > MAX_ALLOWABLE_QUERIES:
raise NotSupportedError(
"Unable to run query as it required more than {} subqueries (limit is configurable with DJANGAE_MAX_QUERY_BRANCHES)".format(
MAX_ALLOWABLE_QUERIES
)
)
def remove_empty_in(node):
"""
Once we are normalized, if any of the branches filters
on an empty list, we can remove that entire branch from the
query. If this leaves no branches, then the result set is empty
"""
# This is a bit ugly, but you try and do it more succinctly :)
# We have the following possible situations for IN queries with an empty
# value:
# - Negated:
|
anhstudios/swganh
|
data/scripts/templates/object/creature/npc/droid/shared_wed_treadwell_base.py
|
Python
|
mit
| 460
| 0.045652
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEAS
|
E SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/
|
npc/droid/shared_wed_treadwell_base.iff"
result.attribute_template_id = 3
result.stfName("droid_name","wed_treadwell_base")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
tnwhitwell/lexicon
|
tests/test_main.py
|
Python
|
mit
| 1,149
| 0.008703
|
import lexicon.__main__
import pytest
def test_BaseProviderParser():
baseparser = lexicon.__main__.BaseProviderParser()
parsed = baseparser.parse_args(['list','capsulecd.com','TXT'])
assert parsed.action == 'list'
assert parsed.domain == 'capsulecd.com'
assert parsed.type == 'TXT'
assert parsed.ttl == None
def test_BaseProviderParser_without_domain():
baseparser = lexicon.__main__.BaseProviderParser()
with pytest.raises(SystemExit):
baseparser.parse_args(['list'])
|
def test_BaseProviderParser_without_options():
baseparser = lexicon.__main__.BaseProviderParser()
with pytest.raises(SystemExit):
baseparser.parse_args([])
def test_MainParser():
baseparser
|
= lexicon.__main__.MainParser()
parsed = baseparser.parse_args(['cloudflare','list','capsulecd.com','TXT'])
assert parsed.provider_name == 'cloudflare'
assert parsed.action == 'list'
assert parsed.domain == 'capsulecd.com'
assert parsed.type == 'TXT'
def test_MainParser_without_args():
baseparser = lexicon.__main__.MainParser()
with pytest.raises(SystemExit):
baseparser.parse_args([])
|
igor-rangel7l/igorrangelteste.repository
|
script.module.urlresolver/lib/urlresolver/plugins/userscloud.py
|
Python
|
gpl-2.0
| 2,595
| 0.004624
|
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2014 smokdpi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from lib import jsunpack
from urlresolver import common
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
class Use
|
rsCloudResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "userscloud"
domains = ["userscloud.com"]
pattern = '(?://|\.)(userscloud\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
self.user_agent = common.IE_USER_AGENT
self.net.set_user_agent(self.user_agent)
|
self.headers = {'User-Agent': self.user_agent}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
stream_url = None
self.headers['Referer'] = web_url
html = self.net.http_GET(web_url, headers=self.headers).content
r = re.search('>(eval\(function\(p,a,c,k,e,d\).+?)</script>', html, re.DOTALL)
if r:
js_data = jsunpack.unpack(r.group(1))
stream_url = re.findall('<param\s+name="src"\s*value="([^"]+)', js_data)
stream_url += re.findall('file\s*:\s*[\'|\"](.+?)[\'|\"]', js_data)
stream_url = [i for i in stream_url if not i.endswith('.srt')]
if stream_url:
return stream_url[0]
raise UrlResolver.ResolverError('File not found')
def get_url(self, host, media_id):
return 'https://%s/%s' % (host, media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
|
FrancoisRheaultUS/dipy
|
dipy/direction/tests/test_pmf.py
|
Python
|
bsd-3-clause
| 3,786
| 0
|
import warnings
import numpy as np
import numpy.testing as npt
from dipy.core.gradients import gradient_table
from dipy.core.sphere import HemiSphere, unit_octahedron
from dipy.direction.pmf import SimplePmfGen, SHCoeffPmfGen, BootPmfGen
from dipy.reconst.csdeconv import Constrai
|
nedSph
|
ericalDeconvModel
from dipy.reconst.dti import TensorModel
from dipy.sims.voxel import single_tensor
response = (np.array([1.5e3, 0.3e3, 0.3e3]), 1)
def test_pmf_from_sh():
sphere = HemiSphere.from_sphere(unit_octahedron)
pmfgen = SHCoeffPmfGen(np.ones([2, 2, 2, 28]), sphere, None)
# Test that the pmf is greater than 0 for a valid point
pmf = pmfgen.get_pmf(np.array([0, 0, 0], dtype='float'))
npt.assert_equal(np.sum(pmf) > 0, True)
# Test that the pmf is 0 for invalid Points
npt.assert_array_equal(pmfgen.get_pmf(np.array([-1, 0, 0], dtype='float')),
np.zeros(len(sphere.vertices)))
npt.assert_array_equal(pmfgen.get_pmf(np.array([0, 0, 10], dtype='float')),
np.zeros(len(sphere.vertices)))
def test_pmf_from_array():
sphere = HemiSphere.from_sphere(unit_octahedron)
pmfgen = SimplePmfGen(np.ones([2, 2, 2, len(sphere.vertices)]))
# Test that the pmf is greater than 0 for a valid point
pmf = pmfgen.get_pmf(np.array([0, 0, 0], dtype='float'))
npt.assert_equal(np.sum(pmf) > 0, True)
# Test that the pmf is 0 for invalid Points
npt.assert_array_equal(pmfgen.get_pmf(np.array([-1, 0, 0], dtype='float')),
np.zeros(len(sphere.vertices)))
npt.assert_array_equal(pmfgen.get_pmf(np.array([0, 0, 10], dtype='float')),
np.zeros(len(sphere.vertices)))
npt.assert_raises(
ValueError,
lambda: SimplePmfGen(np.ones([2, 2, 2, len(sphere.vertices)])*-1))
def test_boot_pmf():
# This tests the local model used for the bootstrapping.
hsph_updated = HemiSphere.from_sphere(unit_octahedron)
vertices = hsph_updated.vertices
bvecs = vertices
bvals = np.ones(len(vertices)) * 1000
bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0)
bvals = np.insert(bvals, 0, 0)
gtab = gradient_table(bvals, bvecs)
voxel = single_tensor(gtab)
data = np.tile(voxel, (3, 3, 3, 1))
point = np.array([1., 1., 1.])
tensor_model = TensorModel(gtab)
boot_pmf_gen = BootPmfGen(data, model=tensor_model, sphere=hsph_updated)
no_boot_pmf = boot_pmf_gen.get_pmf_no_boot(point)
model_pmf = tensor_model.fit(voxel).odf(hsph_updated)
npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0])
npt.assert_array_almost_equal(no_boot_pmf, model_pmf)
# test model spherical harmonic order different than bootstrap order
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
csd_model = ConstrainedSphericalDeconvModel(gtab, response,
sh_order=6)
npt.assert_(len(w) == 1)
npt.assert_(issubclass(w[0].category, UserWarning))
npt.assert_("Number of parameters required " in str(w[0].message))
boot_pmf_gen_sh4 = BootPmfGen(data, model=csd_model, sphere=hsph_updated,
sh_order=4)
pmf_sh4 = boot_pmf_gen_sh4.get_pmf(point)
npt.assert_equal(len(hsph_updated.vertices), pmf_sh4.shape[0])
npt.assert_(np.sum(pmf_sh4.shape) > 0)
boot_pmf_gen_sh8 = BootPmfGen(data, model=csd_model, sphere=hsph_updated,
sh_order=8)
pmf_sh8 = boot_pmf_gen_sh8.get_pmf(point)
npt.assert_equal(len(hsph_updated.vertices), pmf_sh8.shape[0])
npt.assert_(np.sum(pmf_sh8.shape) > 0)
if __name__ == '__main__':
npt.run_module_suite()
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3beta1_generated_test_cases_batch_run_test_cases_sync.py
|
Python
|
apache-2.0
| 1,663
| 0.000601
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache Lice
|
nse, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://w
|
ww.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchRunTestCases
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_TestCases_BatchRunTestCases_sync]
from google.cloud import dialogflowcx_v3beta1
def sample_batch_run_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.BatchRunTestCasesRequest(
parent="parent_value",
test_cases=['test_cases_value_1', 'test_cases_value_2'],
)
# Make the request
operation = client.batch_run_test_cases(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_TestCases_BatchRunTestCases_sync]
|
Clinical-Genomics/taboo
|
genotype/cli/match_cmd.py
|
Python
|
mit
| 3,397
| 0.000294
|
"""Cli for matching samples"""
import logging
import math
import click
from genotype.constants import TYPES
from genotype.match.core import compare_analyses
from genotype.store import api
from genotype.store.models import Analysis
LOG = logging.getLogger(__name__)
def log_result(sample_id, result, hide_fail=False):
total_snps = api.snps().count()
cutoff = math.floor(total_snps / 5)
if result.get("mismatch", 0) == 0 or result.get("mismatch", 0) <= cutoff:
log_func = LOG.info
else:
if hide_fail:
log_func = LOG.debug
else:
log_func = LOG.warning
template = "{sample} | matches: {match}, mismatches: {mismatch}, " "unknown: {unknown}"
log_func(
template.format(
sample=sample_id,
match=result.get("match", 0),
mismatch=result.get("mismatch", 0),
unknown=result.get("unknown", 0),
)
)
@click.command("match")
@click.option("-a", "--analysis", default="genotype", type=click.Choice(TYPES))
@click.argument("sample_ids", nargs=-1)
@click.pass_context
def match_cmd(context, sample_ids, analysis):
"""Match genotypes for an analysis against all samples."""
if len(sample_ids) == 0:
LOG.warning("you must supply at least one sample id")
raise click.Abort
sample_id = sample_ids[0]
sample_obj = api.sample(sample_id, notfound_cb=context.abort)
|
analysis_obj = sample_obj.analysis(analysis)
# compare against all other samples
other_analyses = Analysis.query.filter(Analysis.type != analysis)
if len(sample_ids) > 1:
# compare only with the specified samples
sample_filter = Analysis.sample_id.in_(sample_ids)
other_analyses = other_analyses.filter(sample_filter)
for other_analysis in other_analyses:
result = compare_analyses(analysis_obj, other_analysis)
log_result(other_analysi
|
s.sample_id, result, hide_fail=True)
@click.command("check")
@click.argument("sample_id")
@click.pass_context
def check_cmd(context, sample_id):
"""Check integrity of a sample."""
LOG.info("Running genotype check")
sample_obj = api.sample(sample_id, notfound_cb=context.abort)
# 1. check no calls from genotyping (could be sign of contamination)
total_snps = api.snps().count()
LOG.info("Nr snps in db: %s", total_snps)
cutoff = math.floor(total_snps / 3)
genotype_analysis = sample_obj.analysis("genotype")
if genotype_analysis:
calls = genotype_analysis.check()
if calls["unknown"] >= cutoff:
LOG.warning("genotyping: fail (%s no-calls)", calls["unknown"])
else:
LOG.info("no-calls from genotyping: %s", calls["unknown"])
else:
LOG.info("no genotyping analysis loaded")
# 2. compare genotypes across analyses (sign of sample mixup)
if len(sample_obj.analyses) == 2:
result = sample_obj.compare()
log_result(sample_id, result)
else:
LOG.debug("analyses for samples not loaded")
# 3. check sex determinations
if sample_obj.sex and sample_obj.sex is not "unknown":
if sample_obj.check_sex():
LOG.info("sex determination: pass")
else:
sex_str = "|".join(list(sample_obj.sexes))
LOG.warning("sex determination: fail (%s)", sex_str)
return
LOG.debug("unknown sample sex")
|
mattaustin/django-thummer
|
thummer/templatetags/thummer.py
|
Python
|
apache-2.0
| 3,380
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2011-2018 Matt Austin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
import re
from django.conf import settings
from django.template import Library, Node, NodeList, TemplateSyntaxError
from django.utils.encoding import smart_str
from thummer.utils import get_thumbnail
register = Library()
kw_pat = re.compile(r'^(?P<key>[\w]+)=(?P<value>.+)$')
class ThummerNodeBase(Node):
"""
A Node that renders safely
"""
nodelist_empty = NodeList()
def render(self, context):
try:
return self._render(context)
except Exception:
if settings.DEBUG:
raise
# TODO: Log error
return self.nodelist_empty.render(context)
def _render(self, context):
raise NotImplemented()
@register.tag('thummer')
class ThummerNode(ThummerNodeBase):
child_nodelists = ('nodelist_url', 'nodelist_empty')
error_msg = ('Syntax error. Expected: ``thummer url geometry '
'[key1=val1 key2=val2...] as var``')
def __init__(self, parser, token):
bits = token.split_contents()
if len(bits) < 5 or bits[-2] != 'as':
raise TemplateSyntaxError(self.error_msg)
self.url = parser.compile_filter(bits[1])
self.geometry = parser.compile_filter(bits[2])
self.options = []
for bit in bits[3:-2]:
m = kw_pat.match(bit)
if not m:
raise TemplateSyntaxError(self.error_msg)
key = smart_str(m.group('key'))
expr = parser.compile_filter(m.group('value'))
self.options.append((key, expr))
self.as_var = bits[-1]
self.nodelist_url = parser.parse(('empty', 'endthummer',))
if parser.next_token().contents == 'empty':
self.nodelist_empty = parser.parse(('endthummer',))
parser.delete_first_token()
def _render(self, context):
url = self.url.resolve(context)
geometry = self.geometry.resolve(context)
|
options = {}
for key, expr in self.options:
noresolve = {'True': True, 'False': False, 'None': None}
value = noresolve.get('{}'.format(expr), expr.resolve(context))
if key == 'options':
options.update(value)
else:
options[key] = value
if url:
thumbnail = get_thumbnail(url, geometry, **options)
else:
return self.nodelist_empty.render(context)
|
context.push()
context[self.as_var] = thumbnail
output = self.nodelist_url.render(context)
context.pop()
return output
def __iter__(self):
for node in self.nodelist_url:
yield node
for node in self.nodelist_empty:
yield node
|
Sefrwahed/alfred-news
|
alfred_news/models.py
|
Python
|
mit
| 791
| 0
|
from alfred.modules.api.a_base_model import ABaseModel
class Article(ABaseModel):
def __init__(self, title, summary, date, url, image):
super().__init__()
self.title = title
self.summary = sum
|
mary
self.date = date
self.url = url
self.image = image
class Source(ABaseModel):
def __init__(self, name, url, category_id):
super().__init__()
self.name = name
self.url = url
self.category_id = category_id
class Category(ABaseModel):
def __init__(self, name):
super().__init__()
|
self.name = name
def sources(self):
lst = []
for source in Source.all():
if str(source.category_id) == str(self.id):
lst.append(source)
return lst
|
mizdebsk/pkgdb2
|
tests/test_groups.py
|
Python
|
gpl-2.0
| 9,297
| 0.00043
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
pkgdb tests for the Collection object.
'''
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
import json
import unittest
import sys
import os
from mock import patch
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import pkgdb2
import pkgdb2.lib.model as model
from tests import (Modeltests, FakeFasUser,
FakeFasGroupValid, create_package_acl,
create_package_acl2, user_set)
class PkgdbGrouptests(Modeltests):
""" PkgdbGroup tests. """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(PkgdbGrouptests, self).setUp()
pkgdb2.APP.config['TESTING'] = True
pkgdb2.SESSION = self.session
pkgdb2.api.extras.SESSION = self.session
pkgdb2.ui.SESSION = self.session
pkgdb2.ui.acls.SESSION = self.session
pkgdb2.ui.admin.SESSION = self.session
pkgdb2.ui.collections.SESSION = self.session
pkgdb2.ui.packagers.SESSION = self.session
pkgdb2.ui.packages.SESSION = self.session
self.app = pkgdb2.APP.test_client()
# Let's make sure the cache is empty for the tests
pkgdb2.CACHE.invalidate()
def set_group_acls(self):
''' Create some Group ACLs. '''
fedocal_pkg = model.Package.by_name(self.session, 'rpms', 'fedocal')
devel_collec = model.Collection.by_name(self.session, 'master')
f18_collec = model.Collection.by_name(self.session, 'f18')
pklist_fedocal_f18 = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, f18_collec.id)
pklist_fedocal_devel = model.PackageListing.by_pkgid_collectionid(
self.session, fedocal_pkg.id, devel_collec.id)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='commit',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_f18.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
packager = model.PackageListingAcl(
fas_name='group::infra-sig',
packagelisting_id=pklist_fedocal_devel.id,
acl='watchbugzilla',
status='Approved',
)
self.session.add(packager)
self.session.commit()
def test_api_bugzilla_group(self):
""" Test the api_bugzilla function. """
create_package_acl2(self.session)
self.set_group_acls()
output = self.app.get('/api/bugzilla/')
self.assertEqual(output.status_code, 200)
expected = """# Package Database VCS Acls
# Text Format
# Collection|Package|Description|Owner|Initial QA|Initial CCList
# Backslashes (\) are escaped as \u005c Pipes (|) are escaped as \u007c
Fedora|fedocal|A web-based calendar for Fedora|pingou||group::infra-sig,pingou
Fedora|geany|A fast and lightweight IDE using GTK2|group::gtk-sig||
Fedora|guake|Top down terminal for GNOME|pingou||spot"""
self.assertEqual(output.data, expected)
output = self.app.get('/api/bugzilla/?format=json')
self.assertEqual(output.status_code, 200)
expected = {
u'bugzillaAcls': {
'Fedora': {
"fedocal": {
"owner": "pingou",
"cclist": {
"groups": ["@infra-sig"],
"people": ["pingou"]
},
"qacontact": None,
"summary": "A web-based calendar for Fedora"
},
'geany': {
'owner': '@gtk-sig',
'cclist': {
'groups': [],
'people': []
},
'qacontact': None,
'summary': 'A fast and lightweight IDE using '
'GTK2'
},
'guake': {
'owner': 'pingou',
'cclist': {
'groups': [],
'people': ['spot']
},
'qacontact': None,
'summary': 'Top down terminal for GNOME'
}
}
},
'title': 'Fedora Package Database -- Bugzilla ACLs'
}
data = json.loads(output.data)
self.assertEqual(data, expected)
@patch('pkgdb2.lib.utils')
@patch('pkgdb2.packager_login_required')
def test_package_give_group(self, login_func, mock_func):
""" Test the package_give function to a group. """
login_func.return_value = None
create_package_acl(self.session)
mock_func.get_packagers.return_value = ['spot']
group = FakeFasGroupValid()
group.name = 'gtk-sig'
mock_func.get_fas_group.return_value = group
mock_func.log.return_value = ''
user = FakeFasUser()
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_tok
|
en" type="hidden" value="')[1].split('">')[0]
data = {
'branches': 'master',
'poc': 'spot',
'csrf_token': csrf_token,
}
output = self.app.post('/package/rpms/guake/give', data=data,
follow_redirects
|
=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'rpms/<span property="doap:name">guake</span>'
in output.data)
self.assertEqual(
output.data.count('<a href="/packager/spot/">'), 2)
user.username = 'spot'
user.groups.append('gtk-sig')
with user_set(pkgdb2.APP, user):
output = self.app.get('/package/rpms/guake/give')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Give Point of Contact of package: guake</h1>'
in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
|
reyoung/Paddle
|
python/paddle/fluid/layers/layer_function_generator.py
|
Python
|
apache-2.0
| 10,724
| 0.000466
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import re
import functools
import warnings
import string
from six.moves import cStringIO
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable
from ..layer_helper import LayerHelper
__all__ = [
'deprecated', 'generate_layer_fn', 'generate_layer_fn_noattr', 'autodoc',
'templatedoc'
]
def _convert_(name):
"""
Formatting.
Args:
name: The name/alias
This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _type_to_str_(tp):
return framework_pb2.AttrType.Name(tp)
_two_dollar_pattern_ = re.compile(r"\$\$([^\$]+)\$\$")
_single_dollar_pattern_ = re.compile(r"\$([^\$]+)\$")
_two_bang_pattern_ = re.compile(r"!!([^!]+)!!")
def escape_math(text):
return _two_bang_pattern_.sub(
r'$$\1$$',
_single_dollar_pattern_.sub(r':math:`\1`',
_two_dollar_pattern_.sub(r"!!\1!!", text)))
def _generate_doc_string_(op_proto, additional_ar
|
gs_lines=None):
"""
Generate docstring by OpProto
Args:
op_proto (framework_pb2.OpProto): a protobuf message typed OpProto
Returns:
str: the document string
"""
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("OpPr
|
oto should be `framework_pb2.OpProto`")
buf = cStringIO()
buf.write(escape_math(op_proto.comment))
buf.write('\nArgs:\n')
for each_input in op_proto.inputs:
line_begin = ' {0}: '.format(_convert_(each_input.name))
buf.write(line_begin)
buf.write(escape_math(each_input.comment))
if each_input.duplicable:
buf.write(" Duplicatable.")
if each_input.dispensable:
buf.write(" Optional.")
buf.write('\n')
skip_attrs = OpProtoHolder.generated_op_attr_names()
for each_attr in op_proto.attrs:
if each_attr.name in skip_attrs:
continue
buf.write(' ')
buf.write(each_attr.name)
buf.write(' (')
buf.write(_type_to_str_(each_attr.type))
buf.write('): ')
buf.write(escape_math(each_attr.comment))
buf.write('\n')
if additional_args_lines is not None:
for line in additional_args_lines:
line = line.strip()
buf.write(' ')
buf.write(line)
buf.write('\n')
if len(op_proto.outputs) != 0:
buf.write('\nReturns:\n')
buf.write(' ')
for each_opt in op_proto.outputs:
if not each_opt.intermediate:
break
buf.write(escape_math(each_opt.comment))
return buf.getvalue()
def generate_layer_fn(op_type):
"""Register the Python layer for an Operator.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \
[output for output in op_proto.outputs if not output.intermediate]
intermediate_outputs = \
[output for output in op_proto.outputs if output.intermediate]
if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be",
"automatically generated. {0}".format(op_type))
if not_intermediate_outputs[0].duplicable:
raise ValueError(
"Only non duplicable op can be automatically generated.")
for output in intermediate_outputs:
if output.duplicable:
raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable.")
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_dtype(op_proto, *args, **kwargs):
"""
This function performs the sanity check for dtype and
instance type.
"""
dtype = None
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0:
val = [args[0]]
args = args[1:]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype. {1} vs {2}".format(
op_type, dtype, each.dtype))
return dtype
def func(*args, **kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, *args, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0 and len(args) != 0:
val = args[0]
args = args[1:]
inputs[ipt.name] = val
outputs = dict()
out = kwargs.pop(_convert_(o_name), [])
if out:
out_var = out[0] if (isinstance(out, list) or
isinstance(out, tuple)) else out
else:
out_var = helper.create_variable_for_type_inference(dtype=dtype)
outputs[o_name] = [out_var]
for name in intermediate_output_names:
outputs[name] = [
helper.create_variable_for_type_inference(dtype=dtype)
]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out_var)
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def generate_layer_fn_noattr(op_type):
"""Register the Python layer for an Operator without Attribute.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, exp , tanh etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None):
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def deprecated(func_or_class):
"""
Deprecated warning decorator. It will result a warning message.
Should be used before class or function, member function
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
"""
Wrap func with deprecated warning
"""
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
|
eufarn7sp/egads-eufar
|
egads/algorithms/thermodynamics/temp_virtual_cnrm.py
|
Python
|
bsd-3-clause
| 3,161
| 0.016134
|
__author__ = "mfreer"
__date__ = "2011-05-27 14:27"
__version__ = "1.0"
__all__ = ["TempVirtualCnrm"]
import egads.core.egads_core as egads_core
import egads.core.metadata as egads_metadata
class TempVirtualCnrm(egads_core.EgadsAlgorithm):
"""
FILE temp_virtual_cnrm.py
VERSION 1.0
CATEGORY Thermodynamics
PURPOSE Calculate virtual temperature
DESCRIPTION Calculates virtual temperature given static pressure and mixing ratio.
INPUT T_s vector K or C static temperature
r vector g/kg water vapor mixing ratio
OUTPUT T_v vector K or C virtual temperature
SOURCE CNRM/GMEI/TRAMM
REFERENCES Triplet-Roche, page 56.
"""
def __init__(self, return_Egads=True):
egads_core.EgadsAlgorithm.__init__(self, return_Egads)
self.output_metadata = egads_metadata.VariableMetadata({'units':'K',
'long_name':'virtual temperature',
'standard_name':'virtual_temperature',
'Category':['Thermodynamics','Atmos State']})
self.metadata = egads_metadata.AlgorithmMetadata({'Inputs':['T_s', 'r'],
'InputUnits':['K','g/kg'],
'InputTypes':['vector','vector'],
'InputDescription':['Static temperature','Water vapor mixing ratio'],
'Outputs':['T_v'],
'OutputUnits':['K'],
'OutputTypes':['vector'],
'OutputDescription':['Virtual temperature'],
'Purpose':'Calculate virtual temperature',
|
'Description':'Calculates virtual temperature given static pressure and mixing ratio',
'Category':'Thermodynamics',
'Source':'CNRM/GMEI/TRAMM',
'References':'Triplet-Roche, page 56',
'Processor':self.name,
|
'ProcessorDate':__date__,
'ProcessorVersion':__version__,
'DateProcessed':self.now()},
self.output_metadata)
def run(self, T_s, r):
return egads_core.EgadsAlgorithm.run(self, T_s, r)
def _algorithm(self, T_s, r):
RvRa = 1.608
T_v = T_s * (1 + RvRa * r) / (1 + r)
return T_v
|
wang1352083/pythontool
|
python-2.7.12-lib/test/test_curses.py
|
Python
|
mit
| 11,284
| 0.004165
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import sys
import tempfile
import unittest
from test.test_support import requires, import_module, verbose, run_unittest
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
import_module('curses.panel')
import_module('curses.ascii')
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
'requires curses.%s' % name)
term = os.environ.get('TERM')
# If newterm was supported we could use it instead of initscr and not exit
@unittest.skipIf(not term or term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not sys.__stdout__.isatty():
# Temporary skip tests on non-tty
raise unittest.SkipTest('sys.__stdout__ is not a tty')
cls.tmp = tempfile.TemporaryFile()
fd = cls.tmp.fileno()
else:
cls.tmp = None
fd = sys.__stdout__.fileno()
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=fd)
@classmethod
def tearDownClass(cls):
if cls.tmp:
cls.tmp.close()
del cls.tmp
def setUp(self):
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
|
stdscr.bkgd(' ', curses.A_REVERS
|
E)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def test_module_funcs(self):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
with tempfile.TemporaryFile() as f:
self.stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
self.skip('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
@requires_curses_func('keyname')
|
terranum-ch/GraphLink
|
graphlink/core/gk_graphic.py
|
Python
|
apache-2.0
| 1,424
| 0.002107
|
#!/urs/bin/python
import os
import graphviz
# from gk_node import GKNode
# from gk_link import GKLink
|
class GKGraphic(object):
"""Manage graphic"""
def __init__(self, label=None):
self.m_label = label
self.m_nodes_list = []
self.m_link_list = []
def add_link(self, link):
|
"""add link and related node to the diagram"""
if link is None:
return False
self.m_link_list.append(link)
if link.m_node1 not in self.m_nodes_list:
self.m_nodes_list.append(link.m_node1)
if link.m_node2 not in self.m_nodes_list:
self.m_nodes_list.append(link.m_node2)
return True
def render(self, filename, extension="pdf", size=None):
"""generate the graphic and save result as an image"""
if filename is None:
return False
if size:
size_str = str(size / 100.0) + "!"
dot = graphviz.Graph(comment=self.m_label, format=extension, graph_attr={"size": size_str})
else:
dot = graphviz.Graph(comment=self.m_label, format=extension)
# create the nodes for the nodes_list items
for node in self.m_nodes_list:
node.create_node(dot)
# create the link for the link list
for link in self.m_link_list:
link.create_link(dot)
dot.render(filename, cleanup=True)
return True
|
dashmoment/facerecognition
|
py/apps/scripts/fisherfaces_example.py
|
Python
|
bsd-3-clause
| 2,253
| 0.008877
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys
# append facerec to module search path
sys.path.append("../..")
# import facerec stuff
from facerec.dataset import DataSet
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance, CosineDistance
from facerec.classifier import NearestNeighbor
from facerec.classifier import SVM
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
# import numpy
import numpy as np
# import matplotlib colormaps
import matplotlib.cm as cm
# import for logging
import logging,sys
# set up a handler for logging
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add handler to facerec modules
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# load a dataset (e.g. AT&T Facedatabase)
dataSet = DataSet("/home/philipp/facerec/data/yalefaces_recognition")
# define Fisherfaces as feature extraction method
feature = Fisherfaces()
# define a 1-NN classifier with Euclidean Distance
classifier = Near
|
estNeighbor(dist_metric=EuclideanDistance(), k=1)
# define the model as the combination
model = PredictableModel(feature=feature, classifier=classifier)
# show fisherfaces
model.compute(dataSet.data, dataSet.labels)
# turn the first (at most) 16 eigenvectors into grayscale
# images (note: eigenvectors are stored by column!)
E = []
for i in xrange(min(model.feature.eigenvectors.shape[1], 16)):
e = model.feature.eigenvectors[:,i].reshape(dat
|
aSet.data[0].shape)
E.append(minmax_normalize(e,0,255, dtype=np.uint8))
# plot them and store the plot to "python_fisherfaces_fisherfaces.pdf"
subplot(title="Fisherfaces", images=E, rows=4, cols=4, sptitle="Fisherface", colormap=cm.jet, filename="fisherfaces.pdf")
# perform a 10-fold cross validation
cv = KFoldCrossValidation(model, k=10)
cv.validate(dataSet.data, dataSet.labels)
cv.print_results()
|
liosha2007/plone-groupdocs-comparison-source
|
src/groupdocs/comparison/testing.py
|
Python
|
apache-2.0
| 653
| 0
|
from plone.app.testing import PloneWithPackageLayer
from plone.a
|
pp.testing import IntegrationTesting
from plone.app.testing import FunctionalTesting
import groupdocs.comparison
GROUPDOCS_COMPARISON = PloneWithPackageLayer(
zcml_package=groupdocs.comparison,
zcml_filename='testing.zcml',
gs_profile_id='groupdocs.comparison:testing',
name="GROUPDOCS_COMPARISON")
GROUPDOCS_COMPARISON_INTEGRATION = IntegrationTesting(
bases=(GROUPDOCS_COMPARISON, ),
name="GROUPDO
|
CS_COMPARISON_INTEGRATION")
GROUPDOCS_COMPARISON_FUNCTIONAL = FunctionalTesting(
bases=(GROUPDOCS_COMPARISON, ),
name="GROUPDOCS_COMPARISON_FUNCTIONAL")
|
yelley/sssd-gpo
|
src/config/SSSDConfigTest.py
|
Python
|
gpl-3.0
| 71,528
| 0.001566
|
#!/usr/bin/python
'''
Created on Sep 18, 2009
@author: sgallagh
'''
import unittest
import os
from stat import *
import sys
srcdir = os.getenv('srcdir')
if srcdir:
sys.path.insert(0, "./src/config")
srcdir = srcdir + "/src/config"
else:
srcdir = "."
import SSSDConfig
class SSSDConfigTestValid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Validate services
services = sssdconfig.list_services()
self.assertTrue('sssd' in services)
self.assertTrue('nss' in services)
self.assertTrue('pam' in services)
#Verify service attributes
sssd_service = sssdconfig.get_service('sssd')
service_opts = sssd_service.list_options()
self.assertTrue('services' in service_opts.keys())
service_list = sssd_service.get_option('services')
self.assertTrue('nss' in service_list)
self.assertTrue('pam' in service_list)
self.assertTrue('domains' in service_opts)
self.assertTrue('reconnection_retries' in service_opts)
del sssdconfig
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
sssdconfig.delete_service('sssd')
new_sssd_service = sssdconfig.new_service('sssd');
new_options = new_sssd_service.list_options();
self.assertTrue('debug_level' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('command' in new_options)
self.assertEquals(new_options['command'][0], str)
self.assertTrue('reconnection_retries' in new_options)
self.assertEquals(new_options['reconnection_retries'][0], int)
self.assertTrue('services' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('domains' in new_options)
self.assertEquals(new_options['domains'][0], list)
self.assertEquals(new_options['domains'][1], str)
self.assertTrue('sbus_timeout' in new_options)
self.assertEquals(new_options['sbus_timeout'][0], int)
self.assertTrue('re_expression' in new_options)
self.assertEquals(new_options['re_expression'][0], str)
self.assertTrue('full_name_format' in new_options)
self.assertEquals(new_options['full_name_format'][0], str)
self.assertTrue('default_domain_suffix' in new_options)
self.assertEquals(new_options['default_domain_suffix'][0], str)
del sssdconfig
def testDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
#Validate domain list
domains = sssdconfig.list_domains()
self.assertTrue('LOCAL' in domains)
self.assertTrue('LDAP' in domains)
self.assertTrue('PROXY' in domains)
self.assertTrue('IPA' in domains)
#Verify domain attributes
ipa_domain = sssdconfig.get_domain('IPA')
domain_opts = ipa_domain.list_options()
self.assertTrue('debug_level' in domain_opts.keys())
self.assertTrue('id_provider' in domain_opts.keys())
self.assertTrue('auth_provider' in domain_opts.keys())
del sssdconfig
def testListProviders(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
junk_domain = sssdconfig.new_domain('junk')
providers = junk_domain.list_providers()
self.assertTrue('ldap' in providers.keys())
def testCreateNewLocalConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
local_domain = sssdconfig.new_domain('LOCAL')
local_domain.add_provider('local', 'id')
local_domain.set_option('debug_level', 1)
local_domain.set_option('default_shell', '/bin/tcsh')
local_domain.set_active(True)
sssdconfig.save_domain(local_domain)
of = '/tmp/testCreateNewLocalConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testCreateNewLDAPConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
ldap_domain = sssdconfig.new_domain('LDAP')
ldap_domain.add_provider('ldap', 'id')
ldap_domain.set_option('debug_level', 1)
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testCreateNewLDAPConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testModifyExistingConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
ldap_domain.set_option('debug_level', 3)
ldap_domain.remove_provider('auth')
ldap_domain.add_provider('krb5', 'auth')
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = '/tmp/testModifyExistingConfig.conf'
#Ensure the output file doesn't exist
try:
os
|
.unlink(of)
except:
pass
#Write out the file
sssdc
|
onfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0177)
#Remove the output file
os.unlink(of)
def testSpaces(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
self.assertEqual(ldap_domain.get_option('auth_provider'), 'ldap')
self.assertEqual(ldap_domain.get_option('id_provider'), 'ldap')
class SSSDConfigTestInvalid(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBadBool(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-invalid-badbool.conf")
self.assertRaises(TypeError,
sssdconfig.get_domain,'IPA')
class SSSDConfigTestSSSDService(unittest.TestCase):
def setUp(self):
self.schema = SSSDConfig.SSSDConfigS
|
simontakite/sysadmin
|
pythonscripts/webprogrammingwithpython/Working Files/Chapter 2/0206 loops.py.py
|
Python
|
gpl-2.0
| 381
| 0.015748
|
#number = 1
|
#while number < 11:
# print(number)
# number += 1
# balance = 1000
# rate = 1.02
# years = 0
# while balance < 5000:
# balance *= rate
# years += 1
# print("It takes " + str(years) + " years to reach $5000.")
# for i in [1,2,3,4,5,6,7,8,9,10]:
# print(i)
#for name in ["Jane", "John", "Matt", "George"]:
# print(name)
for i in r
|
ange(1,11):
print(i)
|
Finn10111/PimuxBot
|
pimuxbot.py
|
Python
|
gpl-3.0
| 7,396
| 0.001893
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pip3 install dnspython
import re
import random
import sleekxmpp
import configparser
import smtplib
from email.mime.text import MIMEText
from sqlalchemy import Column, String, Integer, Boolean
from sqlalchemy.ext.declarative import declarative_base
from collections import OrderedDict
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class PimuxBot(sleekxmpp.ClientXMPP):
"""
This XMPP bot will get your commands and do the associated acitons.
"""
def __init__(self, s, jid, password):
self.s = s
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
if msg['type'] == 'chat':
pm = PimuxManager(self.s, msg['from'], msg['body'])
reply = pm.process()
msg.reply(reply).send()
class PimuxManager(object):
"""
Management class for this server
"""
commands = OrderedDict([
('help', 'prints this help message'),
('status', 'prints status'),
('setmail', 'sets e-mail address for password recovery'),
('code', 'validates e-mail address for password recovery via code')
])
def __init__(self, s, jid, body):
self.s = s
self.jid = re.sub(r'/+.*$', '', str(jid))
self.body = body
if config.getboolean('System', 'debug'):
print('message from %s received' % self.jid)
if re.match('^.*@pimux.de$', self.jid):
self.isPimuxUser = True
else:
self.isPimuxUser = False
self.config = configparser.RawConfigParser()
self.config.read('pimuxbot.cfg')
def process(self):
if self.isPimuxUser:
command = self.__getCommand()
if command in self.commands.keys():
if command == 'help':
message = self.__help()
elif command == 'status':
message = self.__getStatus()
elif command == 'setmail':
email = self.__getParam()
if email:
message = self.__setMail(email)
else:
message = 'usage: setmail foobar@example.org'
elif command == 'code':
code = self.__getParam()
if code:
message = self.__validateCode(code)
else:
message = 'code not found'
else:
message = ('Unknown command. Type "help" for a list of '
'commands.')
else:
message = ('Sorry, I don\'t talk to strangers. You need an '
'account at pimux.de which you can register for free.')
return message
def __getCommand(self):
command = self.body.split(' ', 1)[0]
return command
def __getParam(self):
try:
param = self.body.split(' ', 1)[1]
except IndexError:
param = False
return param
def __help(self):
helptext = 'Hello %s, I am the bot of pimux.de.\n' % self.jid
helptext += 'available commands:\n\n'
for key in self.commands:
helptext += key + ': ' + self.commands[key] + '\n'
return helptext
def __getStatus(self):
re = self.s.query(RecoveryEmail).filter(RecoveryEmail.jid==self.jid).one_or_none()
if re:
message = 'Current password recovery e-mail: %s' % re.email
if re.confirmed:
message += "\nYour e-mail address was successfully validated."
else:
message += "\nYour e-mail address was NOT validated yet and cannot be used."
else:
message = 'No password recovery e-mail configured.'
return message
def __sendMail(self, to, subject, message):
sender = 'pimux@pimux.de'
msg = MIMEText(message, 'plain')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = to
s = smtplib.SMTP('localhost')
s.ehlo()
s.sendmail(sender, to, msg.as_string())
s.quit()
def __setMail(self, email):
code = random.randint(1000,9999)
re = RecoveryEmail(jid=self.jid, email=email, code=code)
self.s.merge(re)
self.s.commit()
msg = (
'Please verify your e-mail address by sending '
'"code %s" via XMPP back.'
) % str(code)
self.__sendMail(email, 'verification code for pimux.de', msg)
message =(
'A confirmation code was sent to %s. '
'Please now send "code XXXX" back where XXXX is your '
'code to verify your e-mail address.'
) % email
return message
def __validateCode(self, code):
re = self.s.query(RecoveryEmail).filter(RecoveryEmail.jid==self.jid, RecoveryEmail.code==code)
if re:
re = RecoveryEmail(jid=self.jid, confirmed=True, code=None)
self.s.merge(re)
self.s.commit()
message = 'code valid'
else:
message = 'code invalid'
return message
class RecoveryEmail(Base):
__tablename__ = 'recovery_email'
jid = Column(String(255), primary_key=True)
email = Column(String(255), nullable=False)
confirmed = Column(Boolean, default=False)
code = Column(Integer, nullable=True)
if __name__ == '__main__':
config = configparser.RawConfigParser()
config.read('/etc/pimuxbot.cfg')
jid = config.get('Account', 'jid')
password = config.get('Account', 'password')
db_user = config.get('DB', 'username')
db_pass = config.get('DB', 'password')
db_host = config.get('DB', 'host')
db_name = config.get('DB', 'name')
db_type = config.get('DB', 'type')
# test if the db type is even set
try:
db_type
# if it is not set print an error
except NameError:
if config.getboolean('System', 'debug'):
|
print('Database Type is not set.')
if db_type == 'postgres':
|
engine = create_engine('postgresql://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name))
if db_type == 'mysql':
engine = create_engine('mysql+mysqlconnector://%s:%s@%s/%s' % (db_user, db_pass, db_host, db_name))
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(engine)
s = session()
xmpp = PimuxBot(s, jid, password)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0060') # PubSub
xmpp.register_plugin('xep_0199') # XMPP Ping
#xmpp.ca_certs = config.get('System', 'capath')
# Connect to the XMPP server and start processing XMPP stanzas.
if config.getboolean('System', 'debug'):
print('beginning connection as %s' % jid)
if xmpp.connect(reattempt=True):
if config.getboolean('System', 'debug'):
print('connected as %s' % jid)
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
|
ebertti/nospam
|
contador_linhas.py
|
Python
|
mit
| 581
| 0.005164
|
# coding=utf-8
import os
import glob
import configuracao
def main():
for arquivo in glob.glob(configuracao.DATASET_PREPARADO + '/*.csv'):
linhas = 0
spam = 0
with op
|
en(arquivo, 'r') as arquivo_aberto:
tupla = arquivo_aberto.readline()
while tupla:
linhas += 1
if str(tupla).endswith('True"\
|
n'):
spam += 1
tupla = arquivo_aberto.readline()
print os.path.basename(arquivo)[:2] + ',' + str(linhas) + ',' + str(spam)
if __name__ == "__main__":
main()
|
allmende/synnefo
|
snf-astakos-app/astakos/test/stress.py
|
Python
|
gpl-3.0
| 6,652
| 0.000903
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from optparse import OptionParser
from time import sleep
import threading
import datetime
from random import choice, randint
import logging
path = os.path.dirname(os.path.realpath(__file__))
os.environ['SYNNEFO_SETTINGS_DIR'] = path + '/settings'
os.environ['DJANGO_SETTINGS_MODULE'] = 'synnefo.settings'
from astakos.im import transaction
from astakos.im.models import AstakosUser
from astakos.im.functions import ProjectError
from astakos.im import auth
from astakos.im import activation_backends
from views import submit, approve, join, leave
USERS = {}
PROJECTS = {}
logger = logging.getLogger(__name__)
logger.setLevel(logging.
|
INFO)
def random_name():
alphabet = u'abcdef_123490αβγδεζ'
length = randint(1, 15)
return ''.join(choice(alphabet) for _ in xrange(length))
def random_email():
alphabet = u'abcdef_123490'
length = randint(1, 10)
first = ''.join(choice(alphabet) for _ in xrange(length))
alph
|
abet = u'abcdef'
length = randint(2, 4)
last = ''.join(choice(alphabet) for _ in xrange(length))
return first + '@' + last + '.com'
def new_user():
email = random_email()
backend = activation_backends.get_backend()
try:
AstakosUser.objects.get(email=email)
return None
except AstakosUser.DoesNotExist:
u = auth.make_local_user(email, first_name=random_name(),
last_name=random_name())
backend.verify_user(u, u.verification_code)
backend.accept_user(u)
return u
@transaction.commit_on_success
def new_users(count):
for i in range(count):
while True:
u = new_user()
if u is not None:
USERS[u.id] = u
break
class SubmitApproveT(threading.Thread):
def __init__(self, *args, **kwargs):
self.repeat = kwargs.pop('repeat', 1)
threading.Thread.__init__(self, *args, **kwargs)
def run(self):
owner = choice(USERS.keys())
p_name = random_name()
submit_and_approve(p_name, owner, None, self.repeat,
prefix=self.name)
def submit_and_approve(name, user_id, project_id, repeat, prefix=""):
if prefix:
prefix += ' '
for i in range(repeat):
try:
now = datetime.datetime.now()
logger.info('%s%s: submitting for project %s'
% (prefix, now, project_id))
app_id, project_id = submit(name, user_id, project_id)
except ProjectError as e:
logger.info(e.message)
continue
except Exception as e:
logger.exception(e)
continue
try:
now = datetime.datetime.now()
logger.info('%s%s: approving application %s of project %s'
% (prefix, now, app_id, project_id))
approve(app_id)
PROJECTS[project_id] = True
except Exception as e:
logger.exception(e)
class JoinLeaveT(threading.Thread):
def __init__(self, *args, **kwargs):
self.repeat = kwargs.pop('repeat', 1)
threading.Thread.__init__(self, *args, **kwargs)
def run(self):
user = choice(USERS.values())
while True:
projects = PROJECTS.keys()
if projects:
pid = choice(projects)
break
sleep(0.1)
join_and_leave(pid, user, self.repeat, prefix=self.name)
def join_and_leave(proj_id, user, repeat, prefix=""):
user_id = user.id
if prefix:
prefix += ' '
for i in range(repeat):
try:
now = datetime.datetime.now()
logger.info('%s%s: user %s joining project %s'
% (prefix, now, user_id, proj_id))
membership = join(proj_id, user)
except ProjectError as e:
logger.info(e.message)
continue
except Exception as e:
logger.exception(e)
continue
try:
now = datetime.datetime.now()
logger.info('%s%s: user %s leaving project %s'
% (prefix, now, user_id, proj_id))
leave(membership.id, user)
except ProjectError as e:
logger.info(e.message)
except Exception as e:
logger.exception(e)
def test(users, projects, memb, repeat):
logging.basicConfig()
new_users(users)
for i in range(projects):
SubmitApproveT(repeat=repeat).start()
for i in range(memb):
JoinLeaveT(repeat=repeat).start()
for thread in threading.enumerate():
if thread is not threading.currentThread():
thread.join()
def main():
parser = OptionParser()
parser.add_option('--users',
dest='users',
default=2,
help="Number of users (default=2)")
parser.add_option('--projects',
dest='projects',
default=2,
help="Number of projects (default=2)")
parser.add_option('--memb',
dest='memb',
default=2,
help="Number of membership requests (default=2)")
parser.add_option('--repeat',
dest='repeat',
default=20,
help="Number of iterations (default=20)")
parser.add_option('-q', '--quiet',
action='store_true',
dest='quiet',
default=False,
help="Print only errors")
(options, args) = parser.parse_args()
if options.quiet:
logger.setLevel(logging.WARNING)
users = int(options.users)
projects = int(options.projects)
memb = int(options.memb)
repeat = int(options.repeat)
test(users, projects, memb, repeat)
if __name__ == "__main__":
main()
|
thedod/gistodon
|
gistodon.py
|
Python
|
gpl-3.0
| 8,763
| 0.005592
|
import os, sys, re, argparse, time, json, logging
import requests
from glob import glob
from urlparse import urlsplit
from getpass import getpass
from mastodon import Mastodon
from markdown import markdown
from html_text import extract_text
from flask import (Flask, render_template, abort,
request, redirect, jsonify)
DEBUG = False # If it ain't broke, don't debug it.
NO_TOOTING = False # Handy during debug: create gist, but don't toot.
RE_HASHTAG = re.compile(u'(?:^|(?<=\s))#(\\w+)')
RE_MENTION = re.compile(u'(?:^|(?<=\s))@(\\w+)@([\\w.]+)')
def get_hashtags(s, ignore=None):
tags = set(
['#'+tag.lower() for tag in RE_HASHTAG.findall(s)])
if ignore:
tags -= get_hashtags(ignore)
return tags
def linkify_hashtags(s, instance):
return RE_HASHTAG.sub(
lambda m:
u"[#{tag}](https://{instance}/tags/{tag})".format(
tag=m.group(1), instance=instance),
s)
def get_mentions(s, ignore=None):
mentions = set(
[u"@{}@{}".format(user,instance)
for user, instance in RE_MENTION.findall(s)])
if ignore:
mentions -= get_mentions(ignore)
return mentions
def linkify_mentions(s):
return RE_MENTION.sub(
lambda m:
u"[@{user}](https://{instance}/@{user})".format(
user=m.group(1), instance=m.group(2)),
s)
def url2toot(masto, url):
u = urlsplit(url)
if not (u.scheme=='https' and u.netloc and u.path):
return None # Don't bother the instance
res = masto.search(url, True)
res = res.get('statuses',[])
return res and res[0] or None
def make_gist(title, body):
return requests.post(
"https://api.github.com/gists",
json={
"description": title,
"public": True,
"files": {
"TOOT.md": {
"content": u"### {}\n\n{}".format(title, body)
}
}
}
).json()['html_url']+"#file-toot-md"
def post(masto, body, instance, title=None,
direction='ltr', in_reply_to=None):
# Markdown more than we need, to [hopefully] discard chopped markup.
summary = extract_text(markdown(body.strip()))[:140]
hashtags = get_hashtags(body, ignore=summary)
mentions = get_mentions(body, ignore=summary)
irt_id = in_reply_to and in_reply_to.get('id') or None
body = linkify_hashtags(linkify_mentions(body), instance)
if direction=='rtl':
body = u"""<div dir="rtl">
{}
</div>""".format(markdown(body))
if in_reply_to:
body = u"""#### In reply to [@{}]({}):
{}""".format(
in_reply_to['account']['username'],
in_reply_to['url'], body)
gist = make_gist(
title or u"A gistodon toot, {} GMT".format(
time.asctime(time.gmtime())),
body+u"""
###### Generated by [Gistodon](https://github.com/thedod/gistodon/#readme).""")
if NO_TOOTING:
return gist
status = u'{}... {}'.format(summary, gist)
if hashtags or mentions:
status += u'\n'+u' '.join(hashtags.union(mentions))
return masto.status_post(
status, spoiler_text=title, in_reply_to_id=irt_id)['url']
def webserver(masto, instance, account):
app = Flask(__name__, static_url_path='')
@app.route('/')
def index():
re = request.args.get('re','')
return render_template('index.html', account=account,
re=re)
@app.route('/toot', methods=['POST'])
def tootit():
if not request.form['markdown'].strip():
return "Nothing to toot"
in_reply_to=request.form.get('re')
if in_reply_to:
in_reply_to = url2toot(masto, in_reply_to)
if not in_reply_to:
abort(500, 'The "in reply to" url is not a toot.')
return redirect(post(
masto, request.form['markdown'], instance,
title=request.form['title'],
in_reply_to=in_reply_to,
direction=request.form['direction']))
@app.route('/re', methods=['GET', 'POST'])
def tootsearch():
return jsonify(url2toot(masto,
request.form.get('q', request.args.get('q',''))))
@app.route('/search', methods=['GET', 'POST'])
def search():
q = request.form.get(
'q', request.args.get('q','')).strip()
if not q:
return jsonify([])
res = masto.search(q, True)
return jsonify(sorted(
[
{
# This trick makes sure both local and external
# accounts get a @hostname suffix.
"value": "@{}@{}".format(
a["username"], urlsplit(a["url"]).netloc),
"title": a.get("display_name")
} for a in res.get('accounts',[])]+ \
[{"value": '#'+a} for a in res.get('hashtags',[])],
key=lambda s: s['value'].lower()))
app.run(host='localhost', port=8008, debug=DEBUG)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(
description=("Toot stdin as a gist [markdown is supported],"
" or launch a localhost web interface."))
parser.add_argument('-i', '--instance',
help='Your mastodon instance (e.g. mastodon.social).')
parser.add_argument('-e', '--email',
help='The email address you login to that instance with.')
parser.add_argument('-a', '--app_name', default='Gistodon',
help=('Name for the app (default is Gistodon).'
' Appears below the toot, near the date.'))
parser.add_argument('-w', '--web', action="store_true",
help=("Run as a web server on localhost"
" (toot-specific --title, --re, and --rtl"
" are ignored)."))
parser.add_argument('-t', '--title',
help="Optional: gist's title, and the toot's content warning (CW).")
parser.add_argument('-r', '--re',
help="Optional: url of the toot you're replying to.")
parser.add_argument('--rtl', dest='direction', action='store_const',
const='rtl', default='ltr',
help=("Format the gist as right-to-left text."))
args = parser.parse_args()
instance = args.instance
if instance:
client_cred_filename = '{}.{}.client.secret'.format(args.app_name, args.instance)
else:
candidates = glob('{}.*.client.secret'.format(args.app_name))
assert candidates, "No app/user registered. Please run register.sh first."
client_cred_filename = candi
|
dates[0]
instance = client_cred_filename[len(args.app_name)+1:-len('.client.secret')]
email = args.email
if email:
user_cred_filename = '{}.{}.{}.user.secret'.format(
args.app_name, instance, email.replace('@','.'))
else:
candidates = glob('{}.{}.*.user.secret'.format(
args.app_name, instance))
assert len(candidates), \
"No user registered for {} at {}. Please run reg
|
ister.sh first.".format(
args.app_name, instance)
user_cred_filename = candidates[0]
assert \
os.path.exists(client_cred_filename) and \
os.path.exists(user_cred_filename), \
"App/user not registered. Please run register.sh"
logging.info("Connecting to {}...".format(instance))
masto = Mastodon(
client_id = client_cred_filename,
access_token = user_cred_filename,
api_base_url = 'https://'+instance)
if args.web:
account = masto.account_verify_credentials()
webserver(masto, instance, account)
else:
logging.info("Reading markdown from standard input...")
lines = [unicode(l,'utf-8') for l in sys.stdin.readlines()]
assert len(filter(lambda l: l.strip(), lines)), \
"Empty toot."
body = u'\n'.join(lines)
assert not
|
Salamek/git-deploy
|
git_deploy/git_deploy_remote.py
|
Python
|
gpl-3.0
| 2,740
| 0.015693
|
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT
|
ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__="Adam Schubert"
__date__ ="$6.7.2014 0:22:44$"
import os
import git_deploy
import threading
from classes import Git
from classes import
|
Shell
class DeployWorker(threading.Thread):
caller = None
def __init__(self, caller):
super(DeployWorker, self).__init__()
self.caller = caller
work_list = []
def run(self):
while(len(self.work_list) > 0):
current, branch, ssh_path, tmp = self.work_list.pop()
try:
self.sync(tmp, branch, ssh_path)
self.caller.loger(self.deploy(tmp))
except Exception as e:
self.caller.fail(str(e))
def add_work(self, work):
self.work_list.append(work)
"""
* Method sync local TMP with main repo
"""
def sync(self, tmp, branch, ssh_path):
git = Git(tmp)
git.update(branch, ssh_path)
"""
* Method calls local deployer
* @throws Exception
"""
def deploy(self, tmp):
return git_deploy.GitDeploy(tmp).get_log()
class GitDeployRemote:
workers = {}
"""
* Constructor
* @param string $stdin STDIN
"""
def __init__(self, current, branch, ssh_path, config):
branch = branch.split('/')[-1]
tmp_path = config['hook']['tmp_path']
#Separate tmp repos per branch
parsed_path = Git.git_url_parse(ssh_path)
tmp = os.path.join(tmp_path, parsed_path['hostname'], parsed_path['path'], branch)
#tmp is uniqe identifier of repo, this creates front for each repo
if tmp in self.workers:
w = self.workers[tmp]
if w.isAlive():
w.add_work([current, branch, ssh_path, tmp])
else:
self.workers[tmp] = DeployWorker(self)
self.workers[tmp].add_work([current, branch, ssh_path, tmp])
self.workers[tmp].start()
else:
self.workers[tmp] = DeployWorker(self)
self.workers[tmp].add_work([current, branch, ssh_path, tmp])
self.workers[tmp].start()
#clean not running workers
for tmp in self.workers.keys():
if self.workers[tmp].isAlive() == False:
del self.workers[tmp]
def loger(self, l):
l.output()
def fail(self, fail):
Shell.color(fail, 'white', 'red')
|
arante/udacity
|
cs101/lesson3/different_stooges.py
|
Python
|
gpl-3.0
| 374
| 0.005348
|
#!/usr/bin/env
|
python
# -*- coding: utf-8 -*-
# Answered by Billy Wilson Arante
# Last updated on 2016/12/31 EST
# We defined:
stooges = ['Moe','Larry','Curly']
# but in
|
some Stooges films, Curly was
# replaced by Shemp.
# Write one line of code that changes
# the value of stooges to be:
stooges[2] = "Shemp"
print stooges
# but does not create a new List
# object.
|
johankaito/fufuka
|
microblog/venv/lib/python2.7/site-packages/kazoo/tests/test_counter.py
|
Python
|
apache-2.0
| 883
| 0
|
import uuid
fr
|
om nose.tools import eq_
from kazoo.testing import KazooTestCase
class KazooCounterTests(KazooTestCase):
def _makeOne(self, **kw):
path = "/" + uuid.uuid4().hex
return self.client.
|
Counter(path, **kw)
def test_int_counter(self):
counter = self._makeOne()
eq_(counter.value, 0)
counter += 2
counter + 1
eq_(counter.value, 3)
counter -= 3
counter - 1
eq_(counter.value, -1)
def test_float_counter(self):
counter = self._makeOne(default=0.0)
eq_(counter.value, 0.0)
counter += 2.1
eq_(counter.value, 2.1)
counter -= 3.1
eq_(counter.value, -1.0)
def test_errors(self):
counter = self._makeOne()
self.assertRaises(TypeError, counter.__add__, 2.1)
self.assertRaises(TypeError, counter.__add__, b"a")
|
jbarriosc/ACSUFRO
|
LGPL/CommonSoftware/acscourse/ws/src/ACSCOURSE_MOUNTImpl/Mount1.py
|
Python
|
lgpl-2.1
| 2,155
| 0.015313
|
#--CORBA STUBS-----------------------------------------------------------------
import ACSCOURSE_MOUNT__POA
#--ACS Imports-----------------------------------------------------------------
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
#--ACS Error System Imports----------------------------------------------------
import ACSErrTypeACSCourseImpl
class Mount1(ACSCOURSE_MOUNT__POA.Mount1, #CORBA stubs for IDL interface
ACSComponent, #Base IDL interface
ContainerServices, #Developer niceties
ComponentLifecycle): #HLA stuff
'''
Simple component implementation provided as a reference for developers.
'''
def __init__(self):
'''
Just call superclass constructors here.
'''
ACSComponent.__init__(self)
ContainerServices.__init__(self)
return
#------------------------------------------------------------------------------
#--Implementation of IDL methods-----------------------------------------------
#------------------------------------------------------------------------------
def objfix(self, az, el):
'''
Python implementation of IDL method.
'''
if el<=90:
self.getLogger().logInfo("objfix called with az="+str(az)+" and el="+str(el))
else:
self.getLogger().logCritical("Wrong value for el "+str(el))
raise ACSErrTypeACSCourseImpl.TargetOutOfRangeExImpl()
#---------------------------------------------------
|
---------------------------
#--Main defined only for generic testing---------------------------------------
#------------------------------------------------------------------------------
if __name__ == "__main__":
import ACSErrTypeACSCourse
print "Creating an object"
g = Mount1()
try:
g.objfix(10,90)
except ACSErrTypeACSCourse.TargetOutOfRangeEx, e:
h = ACSErrTypeACSC
|
ourseImpl.TargetOutOfRangeExImpl(exception=e, create=0)
h.Print()
print "Done..."
|
sbobovyc/LabNotes
|
bk_precision_8500/profile_solarcell.py
|
Python
|
gpl-3.0
| 4,756
| 0.004626
|
'''
Open Source Initiative OSI - The MIT License:Licensing
Tue, 2006-10-31 04:56 nelson
The MIT License
Copyright (c) 2009 BK Precision
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies
|
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and t
|
his permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This script talks to the DC load in two ways:
1. Using a DCLoad object (you'd use this method when you write a
python application that talks to the DC load.
2. Using the COM interface. This shows how python code uses the
COM interface. Other programming environments (e.g., Visual
Basic and Visual C++) would use very similar techniques to
talk to the DC load via COM.
Note that the DCLoad object and the COM server interface functions
always return strings.
$RCSfile: client.py $
$Revision: 1.0 $
$Date: 2008/05/16 21:02:50 $
$Author: Don Peterson $
'''
import sys, dcload
import time
try:
from win32com.client import Dispatch
except:
pass
err = sys.stderr.write
def TalkToLoad(load, port, baudrate):
'''load is either a COM object or a DCLoad object. They have the
same interface, so this code works with either.
port is the COM port on your PC that is connected to the DC load.
baudrate is a supported baud rate of the DC load.
'''
def test(cmd, results):
if results:
print cmd, "failed:"
print " ", results
exit(1)
else:
print cmd
load.Initialize(port, baudrate) # Open a serial connection
print "Time from DC Load =", load.TimeNow()
test("Set to remote control", load.SetRemoteControl())
test("Set max current to 1 A", load.SetMaxCurrent(1))
test("Set CC current to 0.0 A", load.SetCCCurrent(0.0))
print "Settings:"
print " Mode =", load.GetMode()
print " Max voltage =", load.GetMaxVoltage()
print " Max current =", load.GetMaxCurrent()
print " Max power =", load.GetMaxPower()
print " CC current =", load.GetCCCurrent()
print " CV voltage =", load.GetCVVoltage()
print " CW power =", load.GetCWPower()
print " CR resistance =", load.GetCRResistance()
print " Load on timer time =", load.GetLoadOnTimer()
print " Load on timer state =", load.GetLoadOnTimerState()
print " Trigger source =", load.GetTriggerSource()
print " Function =", load.GetFunction()
print
f = open("output.txt", 'w')
f.write("V\tA\tW\n")
test("Turn on load", load.TurnLoadOn())
i = 0.0
while i < 0.21:
test("Set CC current to %f A" % i, load.SetCCCurrent(i))
i += 0.005
time.sleep(0.2)
values = load.GetInputValues()
for value in values.split("\t"):
print " ", value
f.write(value.split(" ")[0])
f.write('\t')
f.write("\n")
f.close()
test("Turn off load", load.TurnLoadOff())
test("Set to local control", load.SetLocalControl())
def Usage():
name = sys.argv[0]
msg = '''Usage: %(name)s {com|obj} port baudrate
Demonstration python script to talk to a B&K DC load either via the COM
(component object model) interface or via a DCLoad object (in dcload.py).
port is the COM port number on your PC that the load is connected to.
baudrate is the baud rate setting of the DC load.
''' % locals()
print msg
exit(1)
def main():
if len(sys.argv) != 4:
Usage()
access_type = sys.argv[1]
port = int(sys.argv[2])
baudrate = int(sys.argv[3])
if access_type == "com":
load = Dispatch('BKServers.DCLoad85xx')
elif access_type == "obj":
load = dcload.DCLoad()
else:
Usage()
TalkToLoad(load, port, baudrate)
return 0
main()
|
yrchen/CommonRepo
|
commonrepo/infor_api/serializers.py
|
Python
|
apache-2.0
| 842
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed
|
under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created B
|
y: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
from __future__ import absolute_import, unicode_literals
from rest_framework import serializers
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/c/cp2k.py
|
Python
|
gpl-2.0
| 35,231
| 0.003293
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warrant
|
y of
# MERCHANTABILITY or FITNE
|
SS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing CP2K, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Luca Marsella (CSCS)
@author: Damian Alvarez (Forschungszentrum Juelich GmbH)
@author: Alan O'Cais (Forschungszentrum Juelich GmbH)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import fileinput
import glob
import re
import os
import shutil
import sys
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import write_file
from easybuild.tools.config import build_option
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_avail_core_count
from easybuild.tools.toolchain.compiler import OPTARCH_GENERIC
# CP2K needs this version of libxc
LIBXC_MIN_VERSION = '2.0.1'
class EB_CP2K(EasyBlock):
"""
Support for building CP2K
- prepare module include files if required
- generate custom config file in 'arch' directory
- build CP2K
- run regression test if desired
- install by copying binary executables
"""
def __init__(self, *args, **kwargs):
super(EB_CP2K, self).__init__(*args, **kwargs)
self.typearch = None
# this should be set to False for old versions of GCC (e.g. v4.1)
self.compilerISO_C_BINDING = True
# compiler options that need to be set in Makefile
self.debug = ''
self.fpic = ''
# used for both libsmm and libxsmm
self.libsmm = ''
self.modincpath = ''
self.openmp = ''
self.make_instructions = ''
@staticmethod
def extra_options():
extra_vars = {
'type': ['popt', "Type of build ('popt' or 'psmp')", CUSTOM],
'typeopt': [True, "Enable optimization", CUSTOM],
'modincprefix': ['', "IMKL prefix for modinc include dir", CUSTOM],
'modinc': [[], ("List of modinc's to use (*.f90], or 'True' to use "
"all found at given prefix"), CUSTOM],
'extracflags': ['', "Extra CFLAGS to be added", CUSTOM],
'extradflags': ['', "Extra DFLAGS to be added", CUSTOM],
'ignore_regtest_fails': [False, ("Ignore failures in regression test "
"(should be used with care)"), CUSTOM],
'maxtasks': [4, ("Maximum number of CP2K instances run at "
"the same time during testing"), CUSTOM],
'runtest': [True, "Build and run CP2K tests", CUSTOM],
'plumed': [None, "Enable PLUMED support", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def _generate_makefile(self, options):
"""Generate Makefile based on options dictionary and optional make instructions"""
text = "# Makefile generated by CP2K easyblock in EasyBuild\n"
for key, value in sorted(options.iteritems()):
text += "%s = %s\n" % (key, value)
return text + self.make_instructions
def configure_step(self):
"""Configure build
- build Libint wrapper
- generate Makefile
"""
known_types = ['popt', 'psmp']
if self.cfg['type'] not in known_types:
raise EasyBuildError("Unknown build type specified: '%s', known types are %s",
self.cfg['type'], known_types)
# correct start dir, if needed
# recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir
cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k')
if os.path.exists(cp2k_path):
self.cfg['start_dir'] = cp2k_path
self.log.info("Corrected start_dir to %s" % self.cfg['start_dir'])
# set compilers options according to toolchain config
# full debug: -g -traceback -check all -fp-stack-check
# -g links to mpi debug libs
if self.toolchain.options['debug']:
self.debug = '-g'
self.log.info("Debug build")
if self.toolchain.options['pic']:
self.fpic = "-fPIC"
self.log.info("Using fPIC")
# report on extra flags being used
if self.cfg['extracflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags'])
if self.cfg['extradflags']:
self.log.info("Using extra CFLAGS: %s" % self.cfg['extradflags'])
# lib(x)smm support
libsmm = get_software_root('libsmm')
libxsmm = get_software_root('libxsmm')
if libxsmm:
self.cfg.update('extradflags', '-D__LIBXSMM')
self.libsmm = '-lxsmm -lxsmmf'
self.log.debug('Using libxsmm %s' % libxsmm)
elif libsmm:
libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a'))
dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms]
moredflags = ' ' + ' '.join(dfs)
self.cfg.update('extradflags', moredflags)
self.libsmm = ' '.join(libsmms)
self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags))
# obtain list of modinc's to use
if self.cfg["modinc"]:
self.modincpath = self.prepmodinc()
# set typearch
self.typearch = "Linux-x86-64-%s" % self.toolchain.name
# extra make instructions
self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n"
# compiler toolchain specific configuration
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.INTELCOMP:
options = self.configure_intel_based()
elif comp_fam == toolchain.GCC:
options = self.configure_GCC_based()
else:
raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam)
# BLAS/FFTW
if get_software_root('IMKL'):
options = self.configure_MKL(options)
else:
# BLAS
if get_software_root('ACML'):
options = self.configure_ACML(options)
else:
options = self.configure_BLAS_lib(options)
# FFTW (no MKL involved)
if 'fftw3' in os.getenv('LIBFFT', ''):
options = self.configure_FFTW3(options)
# LAPACK
if os.getenv('LIBLAPACK_MT', None) is not None:
options = self.configure_LAPACK(options)
if os.getenv('LIBSCALAPACK', None) is not None:
options = self.configure_ScaLAPACK(options)
# PLUMED
plumed = get_software_root('PLUMED')
if self.cfg['plumed'] and not
|
niklasf/python-prompt-toolkit
|
prompt_toolkit/contrib/regular_languages/lexer.py
|
Python
|
bsd-3-clause
| 2,621
| 0.000763
|
"""
`GrammarLexer` is compatible with Pygments lexers and can be used to highlight
the input using a regular grammar with token annotations.
"""
from __future__ import unicode_literals
from pygments.token import Token
from prompt_toolkit.layout.lexers import Lexer
from .compiler import _CompiledGrammar
__all__ = (
'GrammarLexer',
)
class GrammarLexer(Lexer):
"""
Lexer which can be used for highlighting of tokens according to variables in the grammar.
(It does not actual lexing of the string, but it exposes an API, compatible
with the Pygments lexer class.)
:param compiled_grammar: Grammar as returned by the `compile()` function.
:param lexers: Dictionary mapping variable names of the regular grammar to
the lexers that should be used for t
|
his part. (This can
call other lexers recursively.) If you wish a part of the
grammar to just get one token, use a
`prompt_toolkit.layout.lexers.SimpleLexer`.
"""
def __init__(self, compiled_grammar, default_token=None, lexers=None):
assert isinstance(compiled_grammar, _CompiledGrammar)
assert default_token is None or isinstance(default_token,
|
tuple)
assert lexers is None or all(isinstance(v, Lexer) for k, v in lexers.items())
assert lexers is None or isinstance(lexers, dict)
self.compiled_grammar = compiled_grammar
self.default_token = default_token or Token
self.lexers = lexers or {}
def get_tokens(self, cli, text):
m = self.compiled_grammar.match_prefix(text)
if m:
characters = [[self.default_token, c] for c in text]
for v in m.variables():
# If we have a `Lexer` instance for this part of the input.
# Tokenize recursively and apply tokens.
lexer = self.lexers.get(v.varname)
if lexer:
lexer_tokens = lexer.get_tokens(cli, text[v.start:v.stop])
i = v.start
for t, s in lexer_tokens:
for c in s:
if characters[i][0] == self.default_token:
characters[i][0] = t
i += 1
# Highlight trailing input.
trailing_input = m.trailing_input()
if trailing_input:
for i in range(trailing_input.start, trailing_input.stop):
characters[i][0] = Token.TrailingInput
return characters
else:
return [(Token, text)]
|
teddywing/pubnub-python
|
python/examples/history.py
|
Python
|
mit
| 1,293
| 0.007734
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from pubnub import Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or ''
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key,
|
cipher_key=cipher_key, ssl_on=ssl_on)
channel = 'a'
# Synchronous usage
print pubnub.history(channel, count=2)
# Asynchronous usage
def callback(message):
print(message)
pubnub.history(channel, count=2, callback=callback, error=callback)
# Synchronous usage
print pubnub.history(channel, count=2, include_token=True)
# Asynchronous usage
def callback(message):
print(message)
pubnub.history(channel, count=2, include_token=True,
|
callback=callback, error=callback)
|
benmiroglio/pymatch
|
pymatch/Matcher.py
|
Python
|
mit
| 21,944
| 0.003418
|
from __future__ import print_function
from pymatch import *
import pymatch.functions as uf
class Matcher:
"""
Matcher Class -- Match data for an observational study.
Parameters
----------
test : pd.DataFrame
Data representing the test group
control : (pd.DataFrame)
Data representing the control group
formula : str (optional)
custom formula to use for logistic regression
i.e. "Y ~ x1 + x2 + ..."
yvar : str (optional)
Name of dependent variable (the treatment)
exclude : list (optional)
List of variables to ignore in regression/matching.
Useful for unique idenifiers
"""
def __init__(self, test, control, yvar, formula=None, exclude=[]):
# configure plots for ipynb
plt.rcParams["figure.figsize"] = (10, 5)
# variables generated during matching
aux_match = ['scores', 'match_id', 'weight', 'record_id']
# assign unique indices to test and control
t, c = [i.copy().reset_index(drop=True) for i in (test, control)]
t = t.dropna(axis=1, how="all")
c = c.dropna(axis=1, how="all")
c.index += len(t)
self.data = t.dropna(axis=1, how='all').append(c.dropna(axis=1, how='all'), sort=True)
self.control_color = "#1F77
|
B4"
self.test_color = "#FF7F0E"
self.yvar = yvar
self.exclude = exclude + [self.yvar] + aux_match
self.formula = formula
self.nmodels = 1 # for now
self.models = []
self.swdata = None
self.model_accuracy = []
self.data[yvar] = self.data[yvar].astype(int) # should be binary 0, 1
self.xvars = [i for i in self.data.columns if i not in self.exclude and
|
i != yvar]
self.data = self.data.dropna(subset=self.xvars)
self.matched_data = []
self.xvars_escaped = [ "Q('{}')".format(x) for x in self.xvars]
self.yvar_escaped = "Q('{}')".format(self.yvar)
self.y, self.X = patsy.dmatrices('{} ~ {}'.format(self.yvar_escaped, '+'.join(self.xvars_escaped)),
data=self.data, return_type='dataframe')
self.xvars = [i for i in self.data.columns if i not in self.exclude]
self.test= self.data[self.data[yvar] == True]
self.control = self.data[self.data[yvar] == False]
self.testn = len(self.test)
self.controln = len(self.control)
self.minority, self.majority = [i[1] for i in sorted(zip([self.testn, self.controln],
[1, 0]),
key=lambda x: x[0])]
print('Formula:\n{} ~ {}'.format(yvar, '+'.join(self.xvars)))
print('n majority:', len(self.data[self.data[yvar] == self.majority]))
print('n minority:', len(self.data[self.data[yvar] == self.minority]))
def fit_scores(self, balance=True, nmodels=None):
"""
Fits logistic regression model(s) used for
generating propensity scores
Parameters
----------
balance : bool
Should balanced datasets be used?
(n_control == n_test)
nmodels : int
How many models should be fit?
Score becomes the average of the <nmodels> models if nmodels > 1
Returns
-------
None
"""
# reset models if refitting
if len(self.models) > 0:
self.models = []
if len(self.model_accuracy) > 0:
self.model_accuracy = []
if not self.formula:
# use all columns in the model
self.xvars_escaped = [ "Q('{}')".format(x) for x in self.xvars]
self.yvar_escaped = "Q('{}')".format(self.yvar)
self.formula = '{} ~ {}'.format(self.yvar_escaped, '+'.join(self.xvars_escaped))
if balance:
if nmodels is None:
# fit multiple models based on imbalance severity (rounded up to nearest tenth)
minor, major = [self.data[self.data[self.yvar] == i] for i in (self.minority,
self.majority)]
nmodels = int(np.ceil((len(major) / len(minor)) / 10) * 10)
self.nmodels = nmodels
i = 0
errors = 0
while i < nmodels and errors < 5:
uf.progress(i+1, nmodels, prestr="Fitting Models on Balanced Samples")
# sample from majority to create balance dataset
df = self.balanced_sample()
df = pd.concat([uf.drop_static_cols(df[df[self.yvar] == 1], yvar=self.yvar),
uf.drop_static_cols(df[df[self.yvar] == 0], yvar=self.yvar)],
sort=True)
y_samp, X_samp = patsy.dmatrices(self.formula, data=df, return_type='dataframe')
X_samp.drop(self.yvar, axis=1, errors='ignore', inplace=True)
glm = GLM(y_samp, X_samp, family=sm.families.Binomial())
try:
res = glm.fit()
self.model_accuracy.append(self._scores_to_accuracy(res, X_samp, y_samp))
self.models.append(res)
i = i + 1
except Exception as e:
errors = errors + 1 # to avoid infinite loop for misspecified matrix
print('Error: {}'.format(e))
print("\nAverage Accuracy:", "{}%".
format(round(np.mean(self.model_accuracy) * 100, 2)))
else:
# ignore any imbalance and fit one model
print('Fitting 1 (Unbalanced) Model...')
glm = GLM(self.y, self.X, family=sm.families.Binomial())
res = glm.fit()
self.model_accuracy.append(self._scores_to_accuracy(res, self.X, self.y))
self.models.append(res)
print("\nAccuracy", round(np.mean(self.model_accuracy[0]) * 100, 2))
def predict_scores(self):
"""
Predict Propensity scores for each observation.
Adds a "scores" columns to self.data
Returns
-------
None
"""
scores = np.zeros(len(self.X))
for i in range(self.nmodels):
m = self.models[i]
scores += m.predict(self.X[m.params.index])
self.data['scores'] = scores/self.nmodels
def match(self, threshold=0.001, nmatches=1, method='min', max_rand=10):
"""
Finds suitable match(es) for each record in the minority
dataset, if one exists. Records are exlcuded from the final
matched dataset if there are no suitable matches.
self.matched_data contains the matched dataset once this
method is called
Parameters
----------
threshold : float
threshold for fuzzy matching matching
i.e. |score_x - score_y| >= theshold
nmatches : int
How majority profiles should be matched
(at most) to minority profiles
method : str
Strategy for when multiple majority profiles
are suitable matches for a single minority profile
"random" - choose randomly (fast, good for testing)
"min" - choose the profile with the closest score
max_rand : int
max number of profiles to consider when using random tie-breaks
Returns
-------
None
"""
if 'scores' not in self.data.columns:
print("Propensity Scores have not been calculated. Using defaults...")
self.fit_scores()
self.predict_scores()
test_scores = self.data[self.data[self.yvar]==True][['scores']]
ctrl_scores = self.data[self.data[self.yvar]==False][['scores']]
result, match_ids = [], []
for i in range(len(test_scores)):
# uf.progress(i+1, len(test_scores), 'Matching Control to Test...')
match_id = i
score = test_scores.iloc[i]
if method == 'random':
bool_match = abs(ctrl_scores - score) <= threshold
|
jmmL/misc
|
eights.py
|
Python
|
mit
| 931
| 0.022556
|
def main():
"""A simulation-ish of Summer Eights bumps racing"""
import random
course_length = 1000.0
bung_line_separation = 20
number_of_bung_lines = 12
class Boat:
def __init__(self,name,speed,bung_line):
|
self.name = name
self.speed = speed
self.bung_line = bung_line
def time_to_complete_course(boat):
if (random.random() > 0.95):
boat.speed *= 0.8
return ((course_length - ((number_of_bung_lines - boat.bung_line)
* bung_line_separation)) / boat.speed)
univ = Boat("Univ",20.0,3)
balliol = Boat("Balliol",16.0,1)
boats = [univ, balliol]
#print(time_to_complete_course(univ_boat))
if
|
(time_to_complete_course(univ) < time_to_complete_course(balliol)):
print(univ.name + " won!")
else:
print(balliol.name + " won!")
main()
|
UpSea/thirdParty
|
pyqtgraph-0.9.10/examples/relativity/relativity.py
|
Python
|
mit
| 28,282
| 0.013896
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.parametertree import Parameter, ParameterTree
from pyqtgraph.parametertree import types as pTypes
import pyqtgraph.configfile
import numpy as np
import user
import collections
import sys, os
class RelativityGUI(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.animations = []
self.animTimer = QtCore.QTimer()
self.animTimer.timeout.connect(self.stepAnimation)
self.animTime = 0
self.animDt = .016
self.lastAnimTime = 0
self.setupGUI()
self.objectGroup = ObjectGroupParam()
self.params = Parameter.create(name='params', type='group', children=[
dict(name='Load Preset..', type='list', values=[]),
#dict(name='Unit System', type='list', values=['', 'MKS']),
dict(name='Duration', type='float', value=10.0, step=0.1, limits=[0.1, None]),
dict(name='Reference Frame', type='list', values=[]),
dict(name='Animate', type='bool', value=True),
dict(name='Animation Speed', type='float', value=1.0, dec=True, step=0.1, limits=[0.0001, None]),
dict(name='Recalculate Worldlines', type='action'),
dict(name='Save', type='action'),
dict(name='Load', type='action'),
self.objectGroup,
])
self.tree.setParameters(self.params, showTop=False)
self.params.param('Recalculate Worldlines').sigActivated.connect(self.recalculate)
self.params.param('Save').sigActivated.connect(self.save)
self.params.param('Load').sigActivated.connect(self.load)
self.params.param('Load Preset..').sigValueChanged.connect(self.loadPreset)
self.params.sigTreeStateChanged.connect(self.treeChanged)
## read list of preset configs
presetDir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'presets')
if os.path.exists(presetDir):
presets = [os.path.splitext(p)[0] for p in os.listdir(presetDir)]
self.params.param('Load Preset..').setLimits(['']+presets)
def setupGUI(self):
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.splitter = QtGui.QSplitter()
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.layout.addWidget(self.splitter)
self.tree = ParameterTree(showHeader=False)
self.splitter.addWidget(self.tree)
self.splitter2 = QtGui.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Vertical)
self.splitter.addWidget(self.splitter2)
self.worldlinePlots = pg.GraphicsLayoutWidget()
self.splitter2.addWidget(self.worldlinePlots)
self.animationPlots = pg.GraphicsLayoutWidget()
self.splitter2.addWidget(self.animationPlots)
self.splitter2.setSizes([int(self.height()*0.8), int(self.height()*0.2)])
self.inertWorldlinePlot = self.worldlinePlots.addPlot()
self.refWorldlinePlot = self.worldlinePlots.addPlot()
self.inertAnimationPlot = self.animationPlots.addPlot()
self.inertAnimationPlot.setAspectLocked(1)
self.refAnimationPlot = self.animationPlots.addPlot()
self.refAnimationPlot.setAspectLocked(1)
self.inertAnimationPlot.setXLink(self.inertWorldlinePlot)
self.refAnimationPlot.setXLink(self.refWorldlinePlot)
def recalculate(self):
## build 2 sets of clocks
clocks1 = collections.OrderedDict()
clocks2 = collections.OrderedDict()
for cl in self.params.param('Objects'):
clocks1.update(cl.buildClocks())
clocks2.update(cl.buildClocks())
## Inertial simulation
dt = self.animDt * self.params['Animation Speed']
sim1 = Simulation(clocks1, ref=None, duration=self.params['Duration'], dt=dt)
sim1.run()
sim1.plot(se
|
lf.inertWor
|
ldlinePlot)
self.inertWorldlinePlot.autoRange(padding=0.1)
## reference simulation
ref = self.params['Reference Frame']
dur = clocks1[ref].refData['pt'][-1] ## decide how long to run the reference simulation
sim2 = Simulation(clocks2, ref=clocks2[ref], duration=dur, dt=dt)
sim2.run()
sim2.plot(self.refWorldlinePlot)
self.refWorldlinePlot.autoRange(padding=0.1)
## create animations
self.refAnimationPlot.clear()
self.inertAnimationPlot.clear()
self.animTime = 0
self.animations = [Animation(sim1), Animation(sim2)]
self.inertAnimationPlot.addItem(self.animations[0])
self.refAnimationPlot.addItem(self.animations[1])
## create lines representing all that is visible to a particular reference
#self.inertSpaceline = Spaceline(sim1, ref)
#self.refSpaceline = Spaceline(sim2)
self.inertWorldlinePlot.addItem(self.animations[0].items[ref].spaceline())
self.refWorldlinePlot.addItem(self.animations[1].items[ref].spaceline())
def setAnimation(self, a):
if a:
self.lastAnimTime = pg.ptime.time()
self.animTimer.start(self.animDt*1000)
else:
self.animTimer.stop()
def stepAnimation(self):
now = pg.ptime.time()
dt = (now-self.lastAnimTime) * self.params['Animation Speed']
self.lastAnimTime = now
self.animTime += dt
if self.animTime > self.params['Duration']:
self.animTime = 0
for a in self.animations:
a.restart()
for a in self.animations:
a.stepTo(self.animTime)
def treeChanged(self, *args):
clocks = []
for c in self.params.param('Objects'):
clocks.extend(c.clockNames())
#for param, change, data in args[1]:
#if change == 'childAdded':
self.params.param('Reference Frame').setLimits(clocks)
self.setAnimation(self.params['Animate'])
def save(self):
fn = str(pg.QtGui.QFileDialog.getSaveFileName(self, "Save State..", "untitled.cfg", "Config Files (*.cfg)"))
if fn == '':
return
state = self.params.saveState()
pg.configfile.writeConfigFile(state, fn)
def load(self):
fn = str(pg.QtGui.QFileDialog.getOpenFileName(self, "Save State..", "", "Config Files (*.cfg)"))
if fn == '':
return
state = pg.configfile.readConfigFile(fn)
self.loadState(state)
def loadPreset(self, param, preset):
if preset == '':
return
path = os.path.abspath(os.path.dirname(__file__))
fn = os.path.join(path, 'presets', preset+".cfg")
state = pg.configfile.readConfigFile(fn)
self.loadState(state)
def loadState(self, state):
if 'Load Preset..' in state['children']:
del state['children']['Load Preset..']['limits']
del state['children']['Load Preset..']['value']
self.params.param('Objects').clearChildren()
self.params.restoreState(state, removeChildren=False)
self.recalculate()
class ObjectGroupParam(pTypes.GroupParameter):
def __init__(self):
pTypes.GroupParameter.__init__(self, name="Objects", addText="Add New..", addList=['Clock', 'Grid'])
def addNew(self, typ):
if typ == 'Clock':
self.addChild(ClockParam())
elif typ == 'Grid':
self.addChild(GridParam())
class ClockParam(pTypes.GroupParameter):
def __init__(self, **kwds):
defs = dict(name="Clock", autoIncrementName=True, renamable=True, removable=True, children=[
dict(name='Initial Position', type='float', value=0.0, step=0.1),
#dict(name='V0', type='float', value=0.0, step=0.1),
AccelerationGroup(),
|
bittner/django-media-tree
|
demo_project/demo_project/urls.py
|
Python
|
bsd-3-clause
| 1,273
| 0.007855
|
from media_tree.models import FileNode
from media_tree.contrib.views.listing import FileNodeListingView
from media_tree.contrib.views.detail import FileNodeDetailView
f
|
rom media_tree.contrib.views.detail.image import ImageNodeDetailView
from django.views.generic.base import TemplateView
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^$', TemplateView.as_view(
template_name="media_tree/base.html"
)),
url(r'^listing/$', FileNodeListingView.as_view(
# notice that queryset can be any iterable, for inst
|
ance a list:
queryset=FileNode.objects.filter(level=0),
), name="demo_listing"),
url(r'^files/(?P<path>.+)/$', FileNodeDetailView.as_view(
queryset=FileNode.objects.filter(extension='txt')
), name="demo_detail"),
url(r'^images/(?P<path>.+)/$', ImageNodeDetailView.as_view(
queryset=FileNode.objects.get(path='Example Pictures').get_descendants()
), name="demo_image"),
url(r'^admin/', include(admin.site.urls)),
)
# do NOT use this on a production server
from django.conf import settings
from django.conf.urls.static import static
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
soylentdeen/Graffity
|
src/BCI/IRS16NM.py
|
Python
|
mit
| 4,007
| 0.010482
|
import sys
sys.path.append('../')
import Graffity
import numpy
import PlotTools
import glob
import astropy.io.fits as pyfits
from scipy import optimize
from os import walk
from os.path import join
def fitGaussian(x, y):
errfunc = lambda p, x, y: numpy.abs(p[0]*numpy.exp(-(x**2.0)/(2*p[1]**2.0)) - y)
coeffs = [300000.0, 25.0]
pfit, success = optimize.leastsq(errfunc, coeffs, args=(x,y))
return pfit
figs, axes = PlotTools.configurePlots(4)
observationFiles = numpy.array([])
startdir = '/gvstore1/forFrank/'
for root, dirs, files in walk(startdir):
if 'reduced_20180307' in root:
for f in files:
if 'dualscip2vmred.fits' in f:
observationFiles = numpy.append(observationFiles, join(root, f))
color = {0:'b', 1:'g', 2:'r', 3:'y'}
offsets = {}
offsets[0] = numpy.array([])
offsets[1] = numpy.array([])
offsets[2] = numpy.array([])
offsets[3] = numpy.array([])
strehls = {}
strehls[0] = numpy.array([])
strehls[1] = numpy.array([])
strehls[2] = numpy.array([])
strehls[3] = numpy.array([])
flux = {}
flux[0] = numpy.array([])
flux[1] = numpy.array([])
flux[2] = numpy.array([])
flux[3] = numpy.array([])
for f in observationF
|
iles:
SObjName = pyfits.getheader(f).get("ESO INS SOBJ NAME")
if SObjName == 'IRS16NW':
Grav = Graffity.GRAVITY_Dual_Sci_P2VM(fileBase=f[:-20], processAcqCamData=True)
print "Good - %s" %f
for i in Grav.AcqCamDat.newS
|
trehl.data.keys():
FiberOffset = (Grav.MET_SOBJ_DRA[i]**2.0 + Grav.MET_SOBJ_DDEC[i]**2.0)**0.5
offsets[i] = numpy.append(offsets[i], FiberOffset)
strehls[i] = numpy.append(strehls[i], Grav.Strehl[i])
flux[i] = numpy.append(flux[i], numpy.mean(Grav.AcqCamDat.TOTALFLUX_SC.data[i]))
axes[2].scatter([FiberOffset],[flux[i][-1]],
color=color[i], s=2*2**(10*strehls[i][-1]))
axes[3].scatter([strehls[i][-1]], [flux[i][-1]], color=color[i])
del(Grav)
else:
print "Bad"
binSize=0.2
StrehlBins = numpy.arange(0.05, 0.65, binSize)
offsetRange = numpy.linspace(0.0, 50.0)
Amps = {}
sigs = {}
srs = {}
dFib = numpy.linspace(0, 50.0)
for i in color.keys():
Amps[i] = []
sigs[i] = []
srs[i] = []
for s in StrehlBins:
current = (strehls[i] > s) & (strehls[i] < s+binSize)
if numpy.sum(current) > 5:
#axes[0].clear()
#axes[0].scatter(offsets[i][current], flux[i][current], color=color[i])
fit = fitGaussian(offsets[i][current], flux[i][current])
#print fit[1]
#axes[0].plot(offsetRange, fit[0]*numpy.exp(-(offsetRange)**2.0/(2.0*fit[1])**2.0))
#figs[0].show()
#raw_input()
Amps[i].append(fit[0])
sigs[i].append(fit[1])
srs[i].append(s)
if sigs[i][-1] < 70:
axes[2].plot(dFib,
Amps[i][-1]*numpy.exp(-(dFib)**2.0/(2.0*sigs[i][-1])**2.0),
color = color[i], lw=s)
Amps[i] = numpy.array(Amps[i])
sigs[i] = numpy.array(sigs[i])
srs[i] = numpy.array(srs[i])
axes[0].scatter(srs[i], Amps[i]/1000000.0, color=color[i], label="Telescope = %d"%(i+1))
axes[1].scatter(srs[i][sigs[i] < 70], sigs[i][sigs[i] < 70],
color=color[i], label="Telescope = %d"%(i+1))
axes[0].set_xlabel("Strehl Ratio")
axes[0].set_ylabel("Amplitude")
axes[1].set_xlabel("Strehl Ratio")
axes[1].set_ylabel("Sigma (mas)")
axes[2].set_xlabel("Fiber Offset (mas)")
axes[2].set_ylabel("Total Flux (SC)")
axes[3].set_xlabel("Strehl Ratio")
axes[3].set_ylabel("Total Flux (SC)")
figs[0].suptitle("IRS16NW")
figs[1].suptitle("IRS16NW")
figs[2].suptitle("IRS16NW")
figs[3].suptitle("IRS16NW")
axes[0].legend(loc=4)
axes[1].legend(loc=1)
figs[0].show()
figs[0].savefig("Amplitude_V_Strehl.png")
figs[1].show()
figs[1].savefig("Sigma_V_Strehl.png")
figs[2].show()
figs[3].show()
figs[2].savefig("Offset_V_Flux.png")
figs[3].savefig("Strehl_V_Flux.png")
|
haskelladdict/sconcho
|
sconcho/gui/icons_rc.py
|
Python
|
gpl-3.0
| 575,342
| 0.000009
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sat May 11 12:28:54 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x58\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x16\x00\x00\x00\x16\x08\x06\x00\x00\x00\xc4\xb4\x6c\x3b\
\x00\x00\x04\x1f\x49\x44\x41\x54\x78\xda\xa5\x55\xcf\x4f\x54\x67\
\x14\x3d\xef\xbd\x6f\xde\x30\x3f\x5b\xc6\x81\x86\x0a\x08\x84\xa8\
\x11\x5c\xd8\x11\xc5\xc4\x1a\xd4\x88\xd1\xc4\x85\x89\x2b\x16\x06\
\x97\x2e\x5c\x91\x2e\x5d\x55\x77\x25\x51\x17\x2e\xdc\x9a\xd0\x3f\
\x40\x25\xda\x8d\x89\x71\xa5\xa8\x55\xab\x9a\x4a\x81\x02\x05\x40\
\x1c\x98\x61\x7e\x31\x30\xef\xf5\xdc\x8f\x37\x93\x57\x16\xdd\x78\
\x92\x3b\xf7\xcd\x8f\xef\x7c\xe7\x9e\xef\xde\x6f\x8c\x9f\x80\xe6\
\x80\x65\xfd\xdc\x91\x4c\x0e\x28\xcb\xb2\xf1\x15\xd8\xac\x54\xca\
\x13\xcb\xcb\x23\x1b\x95\xca\x55\x25\xa4\x43\xfd\xfd\x83\xc5\x62\
\x11\xcb\x6b\x6b\x80\xeb\xc2\x0f\xd3\x30\x60\x9a\xa6\xce\x96\x97\
\xcd\xed\xd9\x7b\x8e\xd4\xd5\xd9\x9b\xc0\xe0\x2f\x8f\x1e\x41\xb5\
\x53\x69\xbe\x50\xc0\xfc\xee\xdd\x48\xa4\x52\xf0\xc3\xf0\x16\x69\
\xd2\x60\x50\xbf\x37\x36\x37\x61\x4a\xf6\xc2\xff\x9c\x7f\xf7\x0e\
\x91\xd7\xaf\x21\x9c\x4a\x99\xa6\x2d\x4a\x63\x9d\x9d\x70\xb8\xc8\
\xaf\x34\xd2\xde\x0e\x9b\x11\x6c\x68\x40\x38\x91\x80\x20\xff\xe5\
\x0b\xf2\x8b\x8b\x28\x7c\xfa\x84\xe8\xea\x2a\xe0\x23\x0e\xb6\xb5\
\xa1\x3c\x36\x06\xe1\x54\x2e\xa4\x7a\x17\x1b\xd9\x2c\x9c\x8d\x0d\
\x08\x54\x28\x84\x1d\xa7\x4e\xa1\x7e\xff\x7e\x6c\x47\x34\x99\xd4\
\x81\xae\x2e\xfc\xfd\xec\x19\x2c\x12\x85\xc4\x3e\x5a\x61\xac\xaf\
\xeb\x0d\x84\x53\x89\xa7\x12\xe5\x4c\x06\x16\xbf\xb0\xc3\x61\xec\
\xba\x7c\x19\x75\x9e\xc2\x3f\x1e\x3f\xc6\xe7\x37\x6f\x50\xa1\xca\
\x38\xed\x50\x4d\x4d\x48\x70\xc3\xb6\x23\x47\xb0\xeb\xd0\x21\xac\
\x75\x74\x60\xee\xce\x1d\x34\x28\x05\x50\x98\x01\x68\x3e\xe5\xf0\
\x45\x22\xfd\xfc\x39\x12\xdd\xdd\xf8\xee\xc2\x05\x4d\x5a\xe0\x46\
\xbf\x0d\x0f\xa3\x71\x6e\x0e\xcd\xd1\xe8\x96\xd7\xdc\xd8\xa4\x6d\
\xe6\xf8\x38\x26\xe9\x65\xd3\xc0\x00\x62\x54\x6f\x1d\x3d\x8a\x8d\
\x87\x0f\x61\x93\x87\x32\x35\x9f\x29\x36\x68\x2b\xe6\xe7\x11\x88\
\x44\x90\x3c\x7c\x18\x82\xd1\xeb\xd7\xd1\x36\x31\x81\xfa\x4a\x05\
\x6e\x2e\x87\x6f\xfa\xfb\x61\xf5\xf5\xa1\xc4\x0d\xc1\xf7\xf6\xdb\
\xb7\xc8\xdc\xbd\x0b\x41\xe7\xb1\x63\x98\xad\xaf\x07\xd8\x04\x86\
\x08\xd6\xc4\xd8\x82\x65\x59\x68\x38\x7d\x1a\x82\xdf\xef\xdd\x43\
\x9c\x8a\x14\x5b\xd0\xa5\xc2\x10\x0f\x25\x79\xe6\x0c\x5a\xcf\x9d\
\xdb\x22\xe0\x67\x42\xee\xd2\xdf\xf4\x93\x27\x10\xc4\x0f\x1e\x44\
\x81\x76\x91\x4f\x42\x2b\xd6\x65\x2a\x12\xc7\x79\x20\x82\x89\xfb\
\xf7\xb1\x43\x0e\x93\x21\xc4\x26\xbf\xab\xc2\x09\x04\x84\x54\x93\
\x1b\xcc\x72\x78\x82\x46\x76\x55\x7a\x61\xa1\xa6\x58\xb9\x9e\x2f\
\xc1\x78\x1c\x91\xd6\x56\x08\x8a\x1f\x3f\xc2\x28\x95\xe0\x96\xcb\
\x70\x85\x88\xde\xd6\xc0\xcf\x84\x58\x6f\xc6\xb5\x9b\xac\x4c\x2b\
\x6e\x6e\x46\xc6\xb6\xb1\xb3\x4a\x2c\x46\x1b\x9e\xe2\x2a\x42\x7c\
\x96\x29\xd3\x21\x65\x39\x0e\xaa\x30\xf5\x22\x47\xb2\x4c\x9b\xce\
\x55\xac\xcb\xb3\xff\xf0\x20\xe4\x34\x3e\x37\x33\x03\x41\x78\xdf\
\x3e\x21\xd5\xbe\x2b\x6f\x6c\xfd\x83\xe3\x1f\x67\x7b\xcf\x1e\x08\
\xb2\xb3\xb3\xf8\x96\x03\xe6\x72\x53\x72\xd6\x3c\xd6\x24\x85\x0f\
\x1f\x20\x68\x3e\x79\x52\x37\xbc\xf2\x54\xfb\x15\x73\xa1\x26\xb5\
\xbc\xbb\x43\xf5\xf6\x42\xb0\xcc\xb5\x8d\xec\x73\x6f\xe0\xb8\xc6\
\xf3\x58\x7e\x94\x63\x2f\x0a\xba\xcf\x9f\xc7\x6a\x2a\x55\xb3\xa3\
\xfc\xea\x15\x72\xd3\xd3\x58\x99\x9a\x42\xf1\xc5\x0b\x58\x9e\x62\
\xbb\xa7\x07\x41\x8a\x10\x4c\x73\x6d\xd2\x23\x76\xaa\xc4\xf0\x88\
\x4b\x4f\x9f\x62\x61\x74\x14\x82\x1f\xae\x5d\x43\x91\x2d\xa4\x15\
\xb3\xed\xb2\x97\x2e\x61\xe6\xe2\x45\xf4\xf0\x30\x35\x29\xfb\xdd\
\x1e\x1a\x82\x60\x9c\x6b\xbe\x67\x5f\xb3\x42\x5d\x91\x53\xed\x0a\
\x71\x50\x79\x9e\xe6\x6f\xde\x44\xfe\xc0\x01\x44\x38\xba\x5d\x37\
\x6e\x60\xfa\xc1\x03\x04\x78\x27\xa8\xc9\x49\x48\xcf\x04\xf6\xee\
\x85\xc1\x71\x0e\x1f\x3f\x0e\x41\x8e\x83\x35\x7d\xeb\x16\x7e\x8c\
\x46\x75\x13\x88\x69\x9a\x73\x38\x1a\x75\xfb\xd8\x83\x61\x96\x51\
\x2d\x1d\xb1\x18\x02\x57\xae\xa0\xe5\xec\x59\xfc\x1f\xfe\x64\xbf\
\x2f\xde\xbe\x8d\x14\xab\xb0\x29\x4a\x88\x97\x56\x56\xf0\x2b\xaf\
\x07\xe5\xf7\xb8\x16\x2c\xdd\xe2\x3d\xf1\x17\xa7\xca\x3c\x71\x02\
\x09\xaa\x8c\xb7\xb4\x40\x90\x65\xe7\xc8\x41\xfd\x43\x4f\x77\xbe\
\x7f\x8f\xde\x70\x58\x08\x6b\xe1\x78\x1e\xab\
|
x8a\xe3\x94\x8b\xa5\
\x92\x5d\x1f\x89\x54\x5b\xac\xf6\x4f\x11\x7f\xf9\x12\x16\x0f\xae\
\x04\x60\x89\x15\xe5\xd9\x4e\x31\x46\x03\x07\xa1
|
\x45\x29\x18\x3e\
\x52\xd3\xcb\x9f\x79\x47\x0b\xa7\x5a\x29\x95\x46\x16\x32\x99\x41\
\x07\x74\x20\x14\xfa\xef\xbf\xc3\xb6\x1c\x61\xb8\xb2\x49\xf5\x37\
\x20\xbc\x2c\xef\x97\x79\x05\x4c\x2d\x2d\x41\x38\xc5\x8a\xab\x63\
\x9c\xf1\x78\x3a\x3d\x40\x02\x1b\x5f\x01\x72\x95\xb3\xe5\xf2\x88\
\x70\xfe\x0b\x44\xcb\xf0\x2c\x1e\xa0\xc6\x2d\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\xf7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x05\x74\x49\x44\
\x41\x54\x58\x85\xcd\x97\xcf\x6f\x1b\x45\x14\xc7\xdf\x9b\xd9\x1f\
\xde\xb5\xd7\x89\xd3\x24\x35\x89\xc3\x8f\x42\x53\x08\x87\x22\x37\
\x97\x56\x42\xa9\xc4\xc1\x7f\x40\x2b\x11\x11\x89\x14\xba\x51\xc4\
\x81\x63\x11\x3d\x35\x39\x15\xc1\x91\x03\x8a\xb2\x15\xcd\xa1\x10\
\x89\xe6\x0f\xc8\x01\xa9\xbd\xa4\x17\x17\x29\xa8\x0a\xa1\xa0\x40\
\x89\x95\x5a\x69\xf3\xc3\xbb\xf6\xae\xf7\xd7\x0c\x07\x77\x93\x3a\
\xb1\x13\x03\xaa\xc2\x93\x56\x5a\xcd\xce\xcc\xe7\xbb\x6f\xde\x9b\
\x79\x83\xba\xae\xc3\x51\x1a\x39\x52\xfa\xff\x41\x80\xf0\x4f\x3a\
\x8f\x18\x86\x20\x0b\x30\x24\x0a\x64\x14\x00\xfb\x08\x42\x1c\x00\
\x80\x71\xa8\x00\xf0\x55\x3f\x60\x33\x6e\x00\x77\x6f\xe9\x7a\xd0\
\xea\x9c\xd8\x4a\x0c\x4c\x1b\x06\xc9\x4b\xe4\xba\x20\xe1\x45\x29\
\x2e\xa4\xe5\xb8\xa8\x12\x8a\x75\x7d\x58\xc8\xc1\xad\xf8\xb6\x57\
\x09\x8a\x81\xc7\x6f\x0f\x7a\xec\xea\x98\xae\xb3\xff\x2c\x60\xcc\
\x30\xfa\xa9\x42\x66\xe3\xc7\xe4\x01\x39\x2e\xc8\x87\xaa\x05\x00\
\xb7\x12\xb8\x95\x0d\x77\x29\x74\xd8\xf0\xb4\xae\x3f\xfc\x57\x02\
\xa6\x0d\x03\xf3\x12\xb9\x26\xa9\xe4\x72\x22\xad\x64\x90\xd4\xff\
\x31\x70\x00\xdf\x0d\x01\x38\x00\x95\x08\xec\xf5\x08\x67\x1c\xca\
\x45\xa7\xe0\xd9\xec\xc6\xa0\xc7\x26\xc7\x74\x9d\x37\xe2\x34\x8d\
\x81\xbc\x44\xae\xc5\xbb\xe5\x2b\xb1\x76\x49\x8d\xda\x58\xc8\xa1\
\xbc\xee\xae\x31\x37\x2c\x32\xc6\xd7\xc0\xe3\x77\x38\xa2\x85\x94\
\x9f\x45\xc4\x7e\x51\x15\x4e\x28\x5d\x52\x9a\x0a\x04\x90\x20\x68\
\x99\x78\xa6\xba\xed\x5d\xc9\xaf\xbb\x00\x00\x13\x2d\x7b\x60\xcc\
\x30\xfa\x63\x1d\xd2\x8f\xc9\x8c\x9a\x89\xda\xaa\x25\xcf\xb6\xd7\
\xdd\x45\xcf\x09\x3f\xb8\xa1\xeb\x7f\x36\x9a\x6c\xdc\x30\x4e\x31\
\x01\x17\x3a\x4f\x6a\x1d\x44\xdc\x4d\x30\xb3\x60\x17\xaa\x9b\xde\
\x7b\x8d\x96\x63\x5f\x1a\x4e\x1b\x06\xa1\x2a\x99\xd5\x7a\x95\x0c\
\x90\x5a\x0f\x6b\xcd\x29\x94\x1f\x57\x27\xbf\x19\xf9\xe8\x5c\x33\
\xf8\xb4\x61\x10\x50\xe8\x17\xed\x2f\xab\x0a\x91\x09\x44\x63\x81\
\x00\x68\xbd\x4a\x86\xaa\x64\x76\xda\x30\xf6\xf1\xf6\x35\xe4\x65\
\x72\x3d\xd1\xab\x0e\xa0\x44\x00\x28\x42\xd5\xf4\x6d\xd7\xf4\xbf\
\x9e\xfa\xf0\xe3\x2f\x1b\x81\x23\x78\x5e\xa1\x73\xc9\xde\x58\x4e\
\x4a\x49\x0a\x50\x04\xa0\x08\x2c\xe4\x00\x14\x01\x25\x02\x89\x5e\
\x75\x20\x2f\x93\xeb\x07\x0a\x18\x31\x0c\x41\x50\xe8\x45\xb9\x5d\
\x92\x81\x20\x30\x06\x60\x17\xdd\xc5\x96\xe0\x7d\x4a\x4e\x4a\xc9\
\x0a\x10\x04\x20\x08\x9e\xe5\x3b\x4f\x97\xcd\xcd\x30\xe4\x00\x04\
\x41\x6e\x97\x64\x41\xa1\x17\x47\x0c\xa3\x2e\xee\xea\x04\xc8\x02\
\x0c\x49\x29\x29\x1d\x4d\x62\xad\x39\x6b\x9e\x1d\x7c\x70\x38\x5c\
\xad\x87\x97\x7c\xc7\x5c\x75\xe6\x89\xcf\xcf\xd9\xc5\x6a\x31\x6a\
\x97\x52\x52\x5a\x16\x60\xa8\xa9\x00\x51\xa6\xa3\x4a\xa7\xac\x22\
\x45\x40\x8a\xc0\xdc\xb0\x78\xd0\x9a\xdf\x57\xe9\x5c\xf2\x95\x44\
\x4e\xee\x94\x95\x68\x8c\x57\x0a\x1c\xab\x60\xcf\x0f\x3a\xe1\x85\
\x29\x5d\xff\xd5\x2f\x07\x2b\xd1\x37\xa5\x53\x56\x45\x99\x8e\x36\
\x15\x80\x88\x7d\x3b\x01\x84\x00\x3c\xe4\x6b\x07\xc1\xb5\xd7\x12\
\
|
Cabalist/Mycodo
|
3.5/cgi-bin/Test-Sensor-HT-AM2315.py
|
Python
|
gpl-3.0
| 302
| 0
|
#!/usr/bin/python
|
import time
from tentacle_pi.AM2315 import AM2315
am = AM2315(0x5c, "/dev/i2c-1")
for x in range(10):
temperature, humidity, crc_check = am.sense()
print "Temperat
|
ure: %s" % temperature
print "Humidity: %s" % humidity
print "CRC: %s" % crc_check
time.sleep(2)
|
bdestombe/flopy-1
|
flopy/modflow/mfdrn.py
|
Python
|
bsd-3-clause
| 9,662
| 0.001035
|
"""
mfdrn module. Contains the ModflowDrn class. Note that the user can access
the ModflowDrn class as `flopy.modflow.ModflowDrn`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils.util_list import MfList
class ModflowDrn(Package):
"""
MODFLOW Drain Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is None).
stress_period_data : list of boundaries, recarrays, or dictionary of
boundaries.
Each drain cell is defined through definition of
layer(int), row(int), column(int), elevation(float),
conductance(float).
The simplest form is a dictionary with a lists of boundaries for each
stress period, where each list of boundaries itself is a list of
boundaries. Indices of the dictionary are the numbers of the stress
period. This gives the form of::
stress_period_data =
{0: [
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
],
1: [
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
], ...
kper:
[
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
[lay, row, col, stage, cond],
]
}
Note that if no values are specified for a certain stress period, then
the list of boundaries for the previous stress period for which values
were defined is used. Full details of all options to specify
stress_period_data can be found in the flopy3boundaries Notebook in
the basic subdirectory of the examples directory.
dtype : dtype definition
if data type is different from default
options : list of strings
Package options. (default is None).
extension : string
Filename extension (default is 'drn')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the
|
model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a
|
single string is passed
the package will be set to the string and cbc output names will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow()
>>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all
>>> #stress periods
>>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec)
"""
def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None,
extension='drn', unitnumber=None, options=None,
filenames=None, **kwargs):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowDrn.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowDrn.ftype())
else:
ipakcb = 0
# Fill namefile items
name = [ModflowDrn.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'drn.htm'
self.ipakcb = ipakcb
self.np = 0
if options is None:
options = []
self.options = options
if dtype is not None:
self.dtype = dtype
else:
self.dtype = self.get_default_dtype(structured=self.parent.structured)
self.stress_period_data = MfList(self, stress_period_data)
self.parent.add_package(self)
@staticmethod
def get_default_dtype(structured=True):
if structured:
dtype = np.dtype([("k", np.int), ("i", np.int),
("j", np.int), ("elev", np.float32),
("cond", np.float32)])
else:
dtype = np.dtype([("node", np.int), ("elev", np.float32),
("cond", np.float32)])
return dtype
def ncells(self):
# Returns the maximum number of cells that have drains (developed for MT3DMS SSM package)
# print 'Function must be implemented properly for drn package'
return self.stress_period_data.mxact
def write_file(self, check=True):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]), verbose=self.parent.verbose, level=1)
f_drn = open(self.fn_path, 'w')
f_drn.write('{0}\n'.format(self.heading))
# f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb))
line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, self.ipakcb)
for opt in self.options:
line += ' ' + str(opt)
line += '\n'
f_drn.write(line)
self.stress_period_data.write_transient(f_drn)
f_drn.close()
def add_record(self, kper, index, values):
try:
self.stress_period_data.add_record(kper, index, values)
except Exception as e:
raise Exception("mfdrn error adding record to list: " + str(e))
@staticmethod
def get_empty(ncells=0, aux_names=None, structured=True):
# get an empty recaray that correponds to dtype
dtype = ModflowDrn.get_default_dtype(structured=structured)
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((ncells, len(dtype)), dtype=dtype)
d[:, :] = -1.0E+10
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticm
|
hunch/hunch-gift-app
|
django/conf/locale/es_AR/formats.py
|
Python
|
mit
| 735
| 0.005442
|
# -*- encoding: utf-
|
8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r'j N Y'
TIME_FORMAT = r'H:i:s'
DATETIME_FORMAT = r'j N Y H:i:s'
YEAR_MONTH_FORMAT = r'F Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = r'd/m/Y'
SHORT_DATETIME_FORMAT = r'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # 0: Sunday, 1: Monday
DATE_INPUT_FORMATS = (
'%d/%m/%Y', # '31/12/2009'
'%d/%m/%y', # '31/12/09'
)
TIME_INPUT_FORM
|
ATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
twiest/openshift-tools
|
openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/lib_openshift_api/library/oadm_router.py
|
Python
|
apache-2.0
| 28,872
| 0.003221
|
#!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import shutil
import subprocess
import re
import yaml
# This is here because of a bug that causes yaml
# to incorrectly handle timezone info on timestamps
def timestamp_constructor(_, node):
'''return timestamps as strings'''
return str(node.value)
yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0])
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
yed
|
.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self
|
, resource, rname):
'''return all pods '''
return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
print
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype=None):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(data):
'''Turn an array of dict: filename, content into a files array'''
files = []
for sfile in data:
path = Utils.create_file(sfile['path'], sfile['content'])
files.append(path)
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list'
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
|
libstorage/libstoragemgmt
|
tools/smisping/smisping.py
|
Python
|
lgpl-2.1
| 3,066
| 0
|
#!/usr/bin/env python2
# Simple tool to see if we have a SMI-S provider talking on the network and
# if it has any systems we can test.
#
# Can use for s
|
cripting as exit value will be:
# 0 if array is online and enumerate systems has some
# 1 if we can talk to provider, but no systems
# 2 Wrong credentials (Wrong username or password)
# 3 Unable to lookup RegisteredName in registered profile (interop support)
# 4
|
if we cannot talk to provider (network error, connection refused etc.)
from pywbem import Uint16, CIMError
import pywbem
import sys
DEFAULT_NAMESPACE = 'interop'
INTEROP_NAMESPACES = ['interop', 'root/interop', 'root/PG_Interop']
def get_cim_rps(c):
cim_rps = []
for n in INTEROP_NAMESPACES:
try:
cim_rps = c.EnumerateInstances('CIM_RegisteredProfile',
namespace=n,
localonly=False)
except CIMError as e:
if e[0] == pywbem.CIM_ERR_NOT_SUPPORTED or \
e[0] == pywbem.CIM_ERR_INVALID_NAMESPACE or \
e[0] == pywbem.CIM_ERR_INVALID_CLASS:
pass
else:
raise
if len(cim_rps):
for cim_rp in cim_rps:
if cim_rp['RegisteredOrganization'] == Uint16(11) and \
'Array' == cim_rp['RegisteredName']:
return cim_rp
return None
def systems(url, username, password):
# We will support interop so that we don't have to specify namespace
rc = 4
try:
try:
conn = pywbem.WBEMConnection(url, (username, password),
DEFAULT_NAMESPACE,
no_verification=True)
except Exception as ei:
# Some versions of pywbem don't support the parameter
# 'no_verification'
if 'no_verification' in str(ei):
conn = pywbem.WBEMConnection(url, (username, password),
DEFAULT_NAMESPACE)
else:
raise
if conn:
rps = get_cim_rps(conn)
if rps:
cim_systems = conn.Associators(
rps.path,
ResultClass='CIM_ComputerSystem',
AssocClass='CIM_ElementConformsToProfile')
if len(cim_systems):
print('Found %d system(s)' % (len(cim_systems)))
rc = 0
else:
print('No systems found!')
rc = 1
else:
rc = 3
except Exception as e:
if 'Unauthorized' in str(e):
rc = 2
else:
print('Exception: ', str(e))
return rc
if __name__ == '__main__':
if len(sys.argv) != 4:
print("syntax: ./smisping.py <URL> <username> <password>")
print(" eg. smisping.py https://127.0.0.1:5989 <username> <passwd>")
sys.exit(1)
sys.exit(systems(*(sys.argv[1:])))
|
nugget/home-assistant
|
homeassistant/components/vera/cover.py
|
Python
|
apache-2.0
| 2,003
| 0
|
"""Support for Vera cover - curtains, rollershutters etc."""
import logging
from homeassistant.components.cover import CoverDevice, ENTITY_ID_FORMAT, \
ATTR_POSITION
from homeassistant.components.vera import (
VERA_CONTROLLER, VERA_DEVICES, VeraDevice)
DEPENDENCIES = ['vera']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vera covers."""
add_entities(
[VeraCover(device, hass.data[VERA_CONTROLLER]) for
device in hass.data[VERA_DEVICES]['cover']], True)
class VeraCover(VeraDevice, CoverDevice):
"""Representation a Vera Cover."""
def __init__(self, vera_device, controller):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def current_cover_position(self):
"""
Return current position of cover.
|
0 is closed, 100 is fully open.
"""
position = self.vera_device.get_level()
if position <= 5:
return 0
if position >= 95:
return 100
return position
def set_cover_position(self, **kwargs):
"""Move the cover to a specific po
|
sition."""
self.vera_device.set_level(kwargs.get(ATTR_POSITION))
self.schedule_update_ha_state()
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is not None:
return self.current_cover_position == 0
def open_cover(self, **kwargs):
"""Open the cover."""
self.vera_device.open()
self.schedule_update_ha_state()
def close_cover(self, **kwargs):
"""Close the cover."""
self.vera_device.close()
self.schedule_update_ha_state()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.vera_device.stop()
self.schedule_update_ha_state()
|
DataDog/integrations-extras
|
cyral/tests/conftest.py
|
Python
|
bsd-3-clause
| 479
| 0.004175
|
impor
|
t os
import mock
import pytest
@pytest.fixture()
def mock_agent_data():
f_name = os.path.join(os.path.dirname(__file__), 'fixtures', 'a
|
gent_metrics.txt')
with open(f_name, 'r') as f:
text_data = f.read()
with mock.patch(
'requests.get',
return_value=mock.MagicMock(
status_code=200, iter_lines=lambda **kwargs: text_data.split("\n"), headers={'Content-Type': "text/plain"}
),
):
yield
|
ausarbluhd/EternalLLC
|
scripts/mallory/src/fuzz.py
|
Python
|
gpl-2.0
| 1,915
| 0.019321
|
from random import randint
overflowstrings = ["A" * 255, "A" * 256, "A" * 257, "A" * 420, "A" * 511, "A" * 512, "A" * 1023, "A" * 1024, "A" * 2047, "A" * 2048, "A" * 4096, "A" * 4097, "A" * 5000, "A" * 10000, "A" * 20000, "A" * 32762, "A" * 32763, "A" * 32764, "A" * 32765, "A" * 32766, "A" * 32767, "A" * 32768, "A" * 65534, "A" * 65535, "A" * 65536, "%x" * 1024, "%n" * 1025 , "%s" * 2048, "%s%n%x%d" * 5000, "%s" * 30000, "%s" * 40000, "%.1024d", "%.2048d", "%.4096d", "%.8200d", "%99999999999s", "%99999999999d", "%99999999999x", "%99999999999n", "%99999999999s" * 1000, "%99999999999d" * 1000, "%99999999999x" * 1000, "%99999999999n" * 1000, "%08x" * 100, "%%20s" * 1000,"%%20x" * 1000,"%%20n" * 1000,"%%20d" * 1000, "%#0123456x%08x%x%s%p%n%d%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%#01234
|
56x%%x%%s%%p%%n%%d%%o%%u%%c%%h%%l%%q%%j%%z%%Z%%t%%i%%e%%g%%f%%a%%C%%S%%08x"]
def bitflipping(data,mangle_percentage = 7):
l = len(data)
n = int(l*mangle_percentage/100) # 7% of the bytes to be modified
for i in range(0,n): # We change the bytes
r = randint(0,l-1)
data = data[0:r] + chr(rand
|
int(0,255)) + data[r+1:]
return data
def bofinjection(data):
l = len(data)
r = randint(0,len(overflowstrings)-1)
data = data[0:r] + overflowstrings[r] + data[r-l:]
return data
def fuzz(data, bit_flip_percentage = 20, bof_injection_percentage = 20, bit_flip_density = 7):
#print "Fuzz:"
#print " bfp:" + str(bit_flip_percentage)
#print " bip:" + str(bof_injection_percentage)
r = randint(0,100)
#print " first r:" + str(r)
was_fuzzed = False
if r<=bit_flip_percentage:
was_fuzzed = True
data = bitflipping(data, bit_flip_density)
#print " second r:" + str(r)
r = randint(0,100)
if r<=bof_injection_percentage:
was_fuzzed = True
data = bofinjection(data)
return was_fuzzed, data
|
rocky/python3-trepan
|
trepan/processor/cmdbreak.py
|
Python
|
gpl-3.0
| 5,998
| 0.001334
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010, 2013, 2015-2018, 2020 Rocky Bernstein
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect
from pyficache import code_line_info, code_offset_info
from trepan.misc import wrapped_lines, pretty_modfunc_name
from trepan.processor.parse.semantics import build_bp_expr
from trepan.processor.parse.parser import LocationError
from trepan.processor.parse.scan
|
ner import ScannerError
from trepan.processor.location import resolve_location
|
def set_break(
cmd_obj,
func,
filename,
lineno,
condition,
temporary,
args,
force=False,
offset=None,
):
if lineno is None and offset is None:
part1 = "I don't understand '%s' as a line number, offset, or function name," % " ".join(
args[1:]
)
msg = wrapped_lines(
part1, "or file/module plus line number.", cmd_obj.settings["width"]
)
cmd_obj.errmsg(msg)
return False
if filename is None:
filename = cmd_obj.proc.curframe.f_code.co_filename
filename = cmd_obj.core.canonic(filename)
pass
if func is None:
if lineno:
line_info = code_line_info(filename, lineno)
if not line_info:
part1 = "File %s" % cmd_obj.core.filename(filename)
msg = wrapped_lines(
part1,
"is not stoppable at line %d." % lineno,
cmd_obj.settings["width"],
)
cmd_obj.errmsg(msg)
if force:
cmd_obj.msg("Breakpoint set although it may never be reached")
else:
return False
else:
assert offset is not None
lineno = code_offset_info(filename, offset)
if lineno is None:
part1 = "File %s" % cmd_obj.core.filename(filename)
msg = wrapped_lines(
part1,
"has no line associated with offset %d." % offset,
cmd_obj.settings["width"],
)
cmd_obj.errmsg(msg)
return False
pass
bp = cmd_obj.core.bpmgr.add_breakpoint(
filename,
lineno=lineno,
offset=offset,
temporary=temporary,
condition=condition,
func=func)
if func and inspect.isfunction(func):
cmd_obj.msg(
"Breakpoint %d set on calling function %s()" % (bp.number, func.__name__)
)
part1 = "Currently this is line %d of file" % lineno
msg = wrapped_lines(
part1, cmd_obj.core.filename(filename), cmd_obj.settings["width"]
)
cmd_obj.msg(msg)
else:
part1 = ("Breakpoint %d set at line %d of file" %
(bp.number, lineno))
msg = wrapped_lines(
part1, cmd_obj.core.filename(filename), cmd_obj.settings["width"]
)
cmd_obj.msg(msg)
if func:
func_str = " of %s" % pretty_modfunc_name(func)
else:
func_str = ""
if offset is not None and offset >= 0:
cmd_obj.msg("Breakpoint is at offset %d%s "% (offset, func_str))
pass
return True
INVALID_PARSE_BREAK = (None, None, None, None, None)
def parse_break_cmd(proc, args):
if proc.current_command is None:
proc.errmsg("Internal error")
return INVALID_PARSE_BREAK
text = proc.current_command[len(args[0]) + 1 :]
if len(args) > 1 and args[1] == "if":
location = "."
condition = text[text.find("if ") + 3 :]
elif text == "":
location = "."
condition = None
else:
try:
bp_expr = build_bp_expr(text)
except LocationError as e:
proc.errmsg("Error in parsing breakpoint expression at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_BREAK
except ScannerError as e:
proc.errmsg("Lexical error in parsing breakpoint expression at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_BREAK
location = bp_expr.location
condition = bp_expr.condition
location = resolve_location(proc, location)
if location:
return (
location.method,
location.path,
location.line_number,
condition,
location.offset,
)
else:
return INVALID_PARSE_BREAK
# Demo it
if __name__ == "__main__":
from trepan.processor.command.mock import MockDebugger
from trepan.processor.cmdproc import CommandProcessor
import sys
d = MockDebugger()
cmdproc = CommandProcessor(d.core)
# print '-' * 10
# print_source_line(sys.stdout.write, 100, 'source_line_test.py')
# print '-' * 10
cmdproc.frame = sys._getframe()
cmdproc.setup()
for cmd in (
"break '''c:\\tmp\\foo.bat''':1",
'break """/Users/My Documents/foo.py""":2',
"break",
"break 10",
"break if True",
"break cmdproc.py:5",
"break set_break()",
"break 4 if i==5",
# "break cmdproc.setup()",
):
args = cmd.split(" ")
cmdproc.current_command = cmd
print(parse_break_cmd(cmdproc, args))
pass
|
atsareg/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/Agent/VirtualMachineMonitorAgent.py
|
Python
|
gpl-3.0
| 9,987
| 0.039652
|
""" VirtualMachineMonitorAgent plays the role of the watch dog for the Virtual Machine
"""
import os
import time
import glob
# DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, rootPath
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import List, Network
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.Client.ServerUtils import virtualMachineDB
__RCSID__ = "$Id$"
class VirtualMachineMonitorAgent( AgentModule ):
def __getCSConfig( self ):
if not self.runningPod:
return S_ERROR( "/LocalSite/RunningPod is not defined" )
#Variables coming from the vm
imgPath = "/Cloud/%s" % self.runningPod
for csOption, csDefault, varName in ( ( "MinWorkingLoad", 0.01, "vmMinWorkingLoad" ),
( "LoadAverageTimespan", 60, "vmLoadAvgTimespan" ),
( "HaltPeriod", 600, "haltPeriod" ),
( "HaltBeforeMargin", 300, "haltBeforeMargin" ),
( "HeartBeatPeriod", 300, "heartBeatPeriod" ),
):
path = "%s/%s" % ( imgPath, csOption )
value = self.op.getValue( path, csDefault )
if not value > 0:
return S_ERROR( "%s has an incorrect value, must be > 0" % path )
setattr( self, varName, value )
for csOption, csDefault, varName in (
( "JobWrappersLocation", "/scratch", "vmJobWrappersLocation" ),
):
path = "%s/%s" % ( imgPath, csOption )
value = gConfig.getValue( path, csDefault )
if not value :
return S_ERROR( "%s points to an empty string, cannot be!" % path )
setattr( self, varName, value )
self.haltBeforeMargin = max( self.haltBeforeMargin, int( self.am_getPollingTime() ) + 5 )
self.haltPeriod = max( self.haltPeriod, int( self.am_getPollingTime() ) + 5 )
self.heartBeatPeriod = max( self.heartBeatPeriod, int( self.am_getPollingTime() ) + 5 )
self.log.info( "** VM Info **" )
self.log.info( "Name : %s" % self.runningPod )
self.log.info( "Min Working Load : %f" % self.vmMinWorkingLoad )
self.log.info( "Load Avg Timespan : %d" % self.vmLoadAvgTimespan )
self.log.info( "Job wrappers location : %s" % self.vmJobWrappersLocation )
self.log.info( "Halt Period : %d" % self.haltPeriod )
self.log.info( "Halt Before Margin : %d" % self.halt
|
BeforeMargin )
self.log.info( "HeartBeat Period : %d" % self.heartBeatPeriod )
if self.vmID:
self.log.info( "DIRAC ID : %s
|
" % self.vmID )
if self.uniqueID:
self.log.info( "Unique ID : %s" % self.uniqueID )
self.log.info( "*************" )
return S_OK()
def __declareInstanceRunning( self ):
#Connect to VM monitor and register as running
retries = 3
sleepTime = 30
for i in range( retries ):
result = virtualMachineDB.declareInstanceRunning( self.uniqueID, self.ipAddress )
if result[ 'OK' ]:
self.log.info( "Declared instance running" )
return result
self.log.error( "Could not declare instance running", result[ 'Message' ] )
if i < retries - 1 :
self.log.info( "Sleeping for %d seconds and retrying" % sleepTime )
time.sleep( sleepTime )
return S_ERROR( "Could not declare instance running after %d retries" % retries )
def initialize( self ):
self.am_disableMonitoring()
#Init vars
self.runningPod = gConfig.getValue( '/LocalSite/RunningPod' )
self.log.info( "Running pod name of the image is %s" % self.runningPod )
self.vmID = gConfig.getValue( '/LocalSite/VMID' )
self.__loadHistory = []
self.vmMinWorkingLoad = None
self.vmLoadAvgTimespan = None
self.vmJobWrappersLocation = None
self.haltPeriod = None
self.haltBeforeMargin = None
self.heartBeatPeriod = None
self.am_setOption( "MaxCycles", 0 )
self.am_setOption( "PollingTime", 60 )
#Discover net address
netData = Network.discoverInterfaces()
for iface in sorted( netData ):
if iface.find( "eth" ) == 0:
self.ipAddress = netData[ iface ][ 'ip' ]
break
self.log.info( "IP Address is %s" % self.ipAddress )
#getting the stop policy
self.op = Operations.Operations()
self.vmStopPolicy = self.op.getValue( "Cloud/%s/VMStopPolicy", 'elastic' )
self.log.info( "vmStopPolicy is %s" % self.vmStopPolicy )
#Declare instance running
self.uniqueID = ''
result = virtualMachineDB.getUniqueIDByName( self.vmID )
if result['OK']:
self.uniqueID = result['Value']
result = self.__declareInstanceRunning()
if not result[ 'OK' ]:
self.log.error( "Could not declare instance running", result[ 'Message' ] )
self.__haltInstance()
return S_ERROR( "Halting!" )
self.__instanceInfo = result[ 'Value' ]
#Get the cs config
result = self.__getCSConfig()
if not result[ 'OK' ]:
return result
return S_OK()
def __getLoadAvg( self ):
result = self.__getCSConfig()
if not result[ 'OK' ]:
return result
with open( "/proc/loadavg", "r" ) as fd:
data = [ float( v ) for v in List.fromChar( fd.read(), " " )[:3] ]
self.__loadHistory.append( data )
numRequiredSamples = max( self.vmLoadAvgTimespan / self.am_getPollingTime(), 1 )
while len( self.__loadHistory ) > numRequiredSamples:
self.__loadHistory.pop( 0 )
self.log.info( "Load averaged over %d seconds" % self.vmLoadAvgTimespan )
self.log.info( " %d/%s required samples to average load" % ( len( self.__loadHistory ),
numRequiredSamples ) )
avgLoad = 0
for f in self.__loadHistory:
avgLoad += f[0]
return avgLoad / len( self.__loadHistory ), len( self.__loadHistory ) == numRequiredSamples
def __getNumJobWrappers( self ):
if not os.path.isdir( self.vmJobWrappersLocation ):
return 0
self.log.info( "VM job wrappers path: %s" % self.vmJobWrappersLocation )
jdlList = glob.glob( os.path.join( self.vmJobWrappersLocation, "*", "*.jdl" ) )
return len( jdlList )
def execute( self ):
#Get load
avgLoad, avgRequiredSamples = self.__getLoadAvg()
self.log.info( "Load Average is %.2f" % avgLoad )
if not avgRequiredSamples:
self.log.info( " Not all required samples yet there" )
#Do we need to send heartbeat?
with open( "/proc/uptime" ) as fd:
uptime = float( List.fromChar( fd.read().strip(), " " )[0] )
hours = int( uptime / 3600 )
minutes = int( uptime - hours * 3600 ) / 60
seconds = uptime - hours * 3600 - minutes * 60
self.log.info( "Uptime is %.2f (%d:%02d:%02d)" % ( uptime, hours, minutes, seconds ) )
#Num jobs
numJobs = self.__getNumJobWrappers()
self.log.info( "There are %d job wrappers" % numJobs )
if uptime % self.heartBeatPeriod <= self.am_getPollingTime():
#Heartbeat time!
self.log.info( "Sending hearbeat..." )
result = virtualMachineDB.instanceIDHeartBeat( self.uniqueID, avgLoad, numJobs, 0, 0, )
status = None
if result[ 'OK' ]:
self.log.info( " heartbeat sent!" )
status = result['Value']
else:
if "Transition" in result["Message"]:
self.log.error( "Error on service:", result[ 'Message' ] )
status = result['State']
else:
self.log.error("Connection error", result["Message"])
if status:
self.__processHeartBeatMessage( status, avgLoad )
#Do we need to check if halt?
if avgRequiredSamples and uptime % self.haltPeriod + self.haltBeforeMargin > self.haltPeriod:
self.log.info( "Load average is %s (minimum for working instance is %s)" % ( avgLoad,
self.vmMinWorkingLoad ) )
#current stop polices: elastic (load) and never
if self.vmStop
|
MaxIV-KitsControls/netspot
|
netspot/lib/spotmax/nsinv.py
|
Python
|
mit
| 4,110
| 0.012895
|
#!/usr/bin/python -tt
"""Module to convert data in MongoDB to Ansible inventory JSON."""
# pylint: disable=C0103
import json
import os
import argparse
from netspot import NetSPOT
from spotmax import SPOTGroup
SPECIAL_FIELDS = ['_id', 'lastModified']
class Host(object):
"""Class to hold a host."""
def __init__(self, device):
self.hostname = device['asset']
self.attributes = dict()
# Add groups this deivce belongs
self.groups = device['groups']
for tag in device:
# Break out user variables
if tag == 'variables':
for variable in device[tag]:
for key in variable:
self.add_attribute(key, variable[key])
# Filter out MongoDB special tags
elif tag not in SPECIAL_FIELDS:
self.add_attribute(tag, device[tag])
def add_attribute(self, attribute, value):
"""Add attribute to host."""
self.attributes[attribute] = value
class Hostvars(object):
"""Class to hold host variables."""
def __init__(self):
self.hosts = dict()
def add_host(self, host):
"""Add host and its attributes to the list of hosts."""
self.hosts[host.hostname] = host.attributes
def get_hostvars(self):
"""Returns Ansible formatted hostvars."""
return {'hostvars': self.hosts}
class Group(object):
"""Class that hold group information."""
def __init__(self, group):
self.group = group
self.members = list()
self.vars = dict()
# Get inventory
inventory = SPOTGroup()
# Add group variables
group_vars = inventory.get_variables(group)
for var in group_vars:
if var.keys()[0] not in SPECIAL_FIELDS:
self.add_vars(var.keys()[0], var.values()[0])
def add_group_member(self, member):
"""Add group to list of groups."""
self.members.append(member)
def add_vars(self, var, value):
"""Add group vairables."""
self.vars[var] = value
def get_group(self):
"""Return Ansible formatted host and vars data."""
return {'hosts': self.members,
'vars': self.vars}
def AnsibleInventory(attribute=None, json_output=True, inventory=None):
"""Class to generate and return Ansible JSON inventory data.
Args:
attribute: string, what to search for
json_output: boolean, True: return JSON
False: return raw data
inventory: searchable inventory
Return:
search result in either JSON or raw format
"""
# Reurn {} if inventory is missing
if not inventory:
return {}
if attribute:
cursor = inventory.search(attribute)
groups = dict()
data = dict()
hostvars = Hostvars()
# Add devices
for asset in cursor:
# Create Host object and add it to hostvars
host = Host(asset)
hostvars.add_host(host)
# Add group/role and group/role member
for group in asset['groups']:
if group not in groups:
groups[group] = Group(group)
# Add device as member to the group
groups[group].add_group_member(asset['asset'])
# Update group/role members in return data
data.update({group: groups[group].get_group()})
# Return data
data.update({'_meta': hostvars.get_hostvars()})
if json_output:
return json.dumps(data, sort_keys=True, inde
|
nt=4, separators=(',', ': '))
else:
return data
def main():
"""Print Ansible dynamic inventory."""
# Arguments
parser = argparse.ArgumentParser(description='MAX IV Network SPOT - netspot')
parser.add_argument('-l', '--list', help='List all', action='store_true', required=True)
parser.add_argument('-f', '--filter',
help='Filter: eg. group:blue',
action='store',
required=False,
|
default=None)
args = parser.parse_args()
if args.list:
if args.filter:
print AnsibleInventory(attribute=args.filter, inventory=NetSPOT())
elif os.environ.get('FILTER'):
print AnsibleInventory(attribute=os.environ['FILTER'], inventory=NetSPOT())
else:
print "Need filter criteria. Specify with -s or env variable FILTER."
if __name__ == '__main__':
main()
|
linkdebian/pynet_course
|
class4/exercise5.py
|
Python
|
apache-2.0
| 563
| 0.005329
|
# Use Netmiko to enter into configuration mode on pynet-rtr2.
# Also use Netmiko to verify your state (i.e. that yo
|
u are currently in configuration mode).
from getpass import getpass
import time
from netmiko import ConnectHandler
password = getpass()
pynet_rtr2 = {'device_type': 'cisco_ios', 'ip': '50.76.53.27', 'username': 'pyclass', 'password': password, 'port': 8022}
ssh_connection = ConnectHandler(**pynet_rtr2)
time.sleep(2)
ssh_connection.config_mode()
output = ssh_connection.find_prompt()
print "The current state of the prompt is %s" % out
|
put
|
anthrotype/ctypes-binding-generator
|
test/suite_clang_cindex.py
|
Python
|
gpl-3.0
| 238
| 0.004202
|
import sys
import unittest
import cbind
cbind.choose_cindex_impl(cbind.CLA
|
NG_CINDEX)
import suite_all
if _
|
_name__ == '__main__':
runner = unittest.TextTestRunner()
sys.exit(not runner.run(suite_all.suite_all).wasSuccessful())
|
amitsaha/learning
|
python/strings/difflib_closest_match.py
|
Python
|
unlicense
| 697
| 0.011478
|
import difflib
class Repository:
def __init__(self, fname=None):
if not fname:
fname = '/usr/share/dict/words'
with open(f
|
name) as f:
self.repository = [x.rstrip('\n') for x in f.readlines()]
def find_close_matches(r, w, count=3):
return difflib.get_close_matches(w, r.repository, count)
if __name__ == '__main__':
r = Repository()
w = raw_input('Your word p
|
lease: ')
if len(w.split()) != 1:
sys.exit('please enter a word only')
try:
count = int(raw_input('Number of matches: '))
except ValueError:
sys.exit('Enter a number please')
print find_close_matches(r, w, count)
|
witchard/grole
|
examples/example-async.py
|
Python
|
mit
| 188
| 0.021277
|
#!/usr/bin/env p
|
ython3
import asyncio
from grole import Grole
app = Grole()
@app.route('/(\d+)')
async def index(env, req):
await asynci
|
o.sleep( int(req.match.group(1)) )
app.run()
|
Gaojiaqi/spark-ec2
|
test_speed_master.py
|
Python
|
apache-2.0
| 789
| 0.00507
|
import sys
import os
import subprocess
import time
def get_slaves():
slaves = subprocess.check_output(['cat', '/root/spark-ec2/slaves'])
return slaves.strip().split('\n')
slaves =
|
get_slaves()
for slave in slaves:
subprocess.call(['ssh', slave, 'killall', 'iperf3'])
subprocess.call(['scp', '/root/spark-ec2/test_speed_slave.py', slave+':/root/'])
iperf_master = subprocess.Popen(['iperf3', '-s', '-p', '6789'])
iperf_slaves = []
for slave in slaves:
iperf_slaves.append(subprocess.check_output(['ssh', slave, 'python', 'test_speed_slave.py']))
time.sleep(1)
print "terminating master"
iperf_master.terminate()
subprocess.call(['killall', 'iperf3'])
time.sleep(
|
1)
print "checking slaves speed"
for iperf_slave in iperf_slaves:
print iperf_slave.strip()
|
webadmin87/midnight
|
midnight_news/models.py
|
Python
|
bsd-3-clause
| 2,757
| 0.003709
|
from django.core.urlresolvers import reverse
from django.db import models
from midnight_main.models import BaseTree, Base, BreadCrumbsMixin, BaseComment
from ckeditor.fields import RichTextField
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import ImageField
from mptt.fields import TreeManyToManyField
class Section(BreadCrumbsMixin, BaseTree):
"""
Модель категории новостей
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
sort = models.IntegerField(default=500, verbose_name=_('Sort'))
metatitle = models.CharField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_list', kwargs={'slug': self.slug})
def __str__(self):
return self.title
class MPTTMeta:
order_insertion_by = ['sort']
class Meta:
verbose_name = _('NewsSection')
verbose_name_plural = _('NewsSections')
class News(Base):
"""
Модель новости
"""
title = models.CharField(max_length=255, verbose_name=_('Title'))
slug = models.SlugField(max_length=255, unique=True, verbose_name=_('Slug'))
date = models.DateField(verbose_name=_('Date'), blank=False)
sections = TreeManyToManyField(Section, verbose_name=_('Sections'))
image = ImageField(upload_to='news', verbose_name=_('Image'), blank=True)
annotation = models.TextField(blank=True, verbose_name=_('Annotation'))
text = RichTextField(blank=True, verbose_name=_('Text'))
comments = models.BooleanField(default=False, verbose_name=_('Comments'))
metatitle = models.Cha
|
rField(max_length=2000, blank=True, verbose_name=_('Title'))
keywords = models.CharField(max_length=2000, blank=True, verbose_name=_('Keywords'))
description = models.CharField(max_length=2000, blank=True, verbose_name=_('Description'))
def get_absolute_url(self):
return reverse('midnight_news:news_detail', kwargs={'section_slug': self.sections.all()[0].slug, 'slug': self.slug}
|
)
def __str__(self):
return self.title
class Meta:
verbose_name = _('NewsItem')
verbose_name_plural = _('News')
class NewsComment(BaseComment):
"""
Модель комментария к новости
"""
obj = models.ForeignKey(News)
class Meta:
verbose_name = _('NewsComment')
verbose_name_plural = _('NewsComments')
|
tuttle/django-new-project-template
|
src/myproject/myproject/conf/base_settings.py
|
Python
|
mit
| 5,761
| 0.001389
|
#@PydevCodeAnalysisIgnore
########################################################################
# This module is a direct copy of the current
# django.conf.project_template.settings
# Compare when the Django is upgraded. Make your project settings
# changes in settings modules that are importing this one.
########################################################################
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '{{ secret_key }}'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version'
|
: 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.Requi
|
reDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
smathot/quiedit
|
libquiedit/theme.py
|
Python
|
gpl-2.0
| 3,523
| 0.026114
|
# -*- coding: utf-8 -*-
"""
This file is part of quiedit.
quiedit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
quiedit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR
|
A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with quiedit. If not, se
|
e <http://www.gnu.org/licenses/>.
"""
from PyQt4 import QtGui, QtCore
import yaml
class theme(object):
"""Handles theming."""
def __init__(self, editor):
"""
Constructor.
Arguments:
editor -- A qtquiedit object.
"""
self.editor = editor
self.themeDict = yaml.load(open(self.editor.get_resource( \
u'themes.yaml')).read())
self.theme = self.recTheme(self.editor.theme)
def apply(self):
"""Applies the theme."""
stylesheet = u"""
background: %(editor_background)s;
color: %(font_color)s;
selection-color: %(editor_background)s;
selection-background-color: %(font_color)s;
font-family: %(font_family)s;
font-size: %(font_size)spt;
""" % self.theme
self.editor.main_widget.setStyleSheet(u"background: %s;" \
% self.theme[u"main_background"])
self.editor.editor_frame.setStyleSheet(u"color: %s;" \
% self.theme[u"border_color"])
self.editor.search_box.setStyleSheet(stylesheet)
self.editor.search_edit.setStyleSheet(u"border: 0;")
self.editor.search_edit.setFont(self.font())
self.editor.search_label.setFont(self.font())
self.editor.command_box.setStyleSheet(stylesheet)
self.editor.command_edit.setStyleSheet(u"border: 0;")
self.editor.command_edit.setFont(self.font())
self.editor.command_label.setFont(self.font())
self.editor.status.setFont(self.font())
self.editor.status.setStyleSheet(u"color: %s;" % self.theme[ \
u"status_color"])
self.editor.central_widget.setMinimumWidth(int(self.theme[ \
u"editor_width"]))
self.editor.central_widget.setMaximumWidth(int(self.theme[ \
u"editor_width"]))
# Apply the theme to all quieditors
for quieditor in self.editor.editor, self.editor.help, \
self.editor._markdown:
quieditor.setStyleSheet(stylesheet)
if not self.theme[u"scrollbar"]:
quieditor.setVerticalScrollBarPolicy( \
QtCore.Qt.ScrollBarAlwaysOff)
quieditor.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
# Apply the theme to the preferences screen
self.editor.prefs.setStyleSheet(stylesheet)
# Redo spellingcheck in the editor
self.editor.editor.check_entire_document()
# Hide the cursor for the main screen
self.editor.setCursor(QtCore.Qt.BlankCursor)
def font(self):
"""
Gives the theme font.
Returns:
A QFont.
"""
font = QtGui.QFont()
font.setPointSize(int(self.theme[u'font_size']))
font.setFamily(self.theme[u'font_family'])
return font
def recTheme(self, theme):
"""
Gets the current theme, respecting inheritance.
Arguments:
theme -- The theme name.
Returns:
A dictionary with with the theme information.
"""
if theme not in self.themeDict:
print(u'theme.__init__(): %s is not a valid theme' % theme)
theme = u'default'
d = self.themeDict[theme]
if u'inherits' in d:
_d = self.recTheme(d[u'inherits'])
for key, val in _d.items():
if key not in d:
d[key] = val
return d
|
CACTUS-Mission/TRAPSat
|
rpi/tim/parallel_read_arduino.py
|
Python
|
mit
| 929
| 0.020452
|
# For Arduino Data:
import serial, signal, sys
################################################################################################
ARDUINO_SERIAL_PORT = '/dev/ttyACM0'
# Serial port should be set to the port found in the Arduino Program when programming the board
parallel_data_file = "parallel_data_file.raw"
# File to write data to
#################################################################################################
file = open(parallel_data_file, 'w')
ard_ser = serial.Serial(ARDUINO_SERIAL_PORT, 19200)
# SIGINT Handler --
|
ensures port and file are closed properly
def signal_handler(signal, frame):
print("Exiting Arduino S
|
erial Read!")
ard_ser.close()
file.close()
sys.exit(0)
# Register signal handler
signal.signal(signal.SIGINT, signal_handler)
# Read Data From Arduino Serial Port
print "Arduino Serial Read Running"
while 1:
data = ard_ser.read(1)
file.write( str(data) )
|
TUB-Control/PaPI
|
papi/event/data/DataBase.py
|
Python
|
gpl-3.0
| 1,072
| 0.008396
|
#!/usr/bin/python3
#-*- coding: latin-1 -*-
"""
Copyright (C) 2014 Technische Universität Berlin,
Fakultät IV - Elektrotechnik und Informatik,
Fachgebiet Regelungssysteme,
Einsteinufer 17, D-10587 Berlin, Germany
This file is part of PaPI.
PaPI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at yo
|
ur option) any later version.
PaPI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PaPI. If not, see <http://www.gnu.org/licenses/>.
Contributors:
Stefan Ruppin
"""
from papi.event.event_bas
|
e import PapiEventBase
class DataBase(PapiEventBase):
def __init__(self, oID, destID, operation, opt):
super().__init__(oID, destID, 'data_event', operation, opt)
|
dafrito/trac-mirror
|
trac/db/tests/util.py
|
Python
|
bsd-3-clause
| 1,572
| 0.001272
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.db.util import sql_escape_percent
# TODO: test IterableCursor, ConnectionWrapper
class SQLEscapeTestCase(unittest.TestCase):
def test_sql_escape_percent(self):
self.assertEqual("%", sql_escape_percent("%"))
self.assertEqual("'%%'", sql_escape_percent("'%'"))
self.assertEqual("''%''", sql_escape_percent("''%''"))
self.assertEqual("'''%
|
%'''", sql_escape_percent("'''%'''"))
self.assertEqual("'''%%'", sql_escape_percent("'''%'"))
self.assertEqual("%s", sql_escape_percent("%s"))
self.assertEqual("% %", sql_escape_percent("% %"))
self.assertEqual("%s %i", sql_escape_percent("%s %i"))
self.assertEqual("'%%s'", sql_es
|
cape_percent("'%s'"))
self.assertEqual("'%% %%'", sql_escape_percent("'% %'"))
self.assertEqual("'%%s %%i'", sql_escape_percent("'%s %i'"))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SQLEscapeTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
wolverineav/horizon-bsn
|
horizon_bsn/future_enabled/_8005_bsnextensions.py
|
Python
|
apache-2.0
| 356
| 0
|
from django.utils.translation import ugettext_lazy as _
# The slug of the pan
|
el group to be added to HORIZON_CONFIG. Required.
PANEL_GROUP = 'bsnextensions'
# The display name of the PANEL_GROUP. Required.
PANEL_GROUP_NAME = _('BSN Extensions')
# The slug of the dashboard t
|
he PANEL_GROUP associated with. Required.
PANEL_GROUP_DASHBOARD = 'bsndashboard'
|
mlperf/training_results_v0.7
|
Google/benchmarks/minigo/implementations/minigo-research-TF-tpu-v4-128/preprocessing.py
|
Python
|
apache-2.0
| 10,808
| 0.007494
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=g-doc-args,missing-docstring,g-doc-return-or-yield,g-space-before-docstring-summary,unused-argument,g-short-docstring-punctuation, g-no-space-after-docstring-summary
"""Utilities to create, read, write tf.Examples."""
import functools
import random
import numpy as np
import tensorflow.compat.v1 as tf
from REDACTED.minigo import bigtable_input
from REDACTED.minigo import coords
from REDACTED.minigo import dual_net
from REDACTED.minigo import features as features_lib
from REDACTED.minigo import go
from REDACTED.minigo import sgf_wrapper
from REDACTED.minigo import symmetries
TF_RECORD_CONFIG = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB)
def _one_hot(index):
onehot = np.zeros([go.N * go.N + 1], dtype=np.float32)
onehot[index] = 1
return onehot
def make_tf_example(features, pi, value):
"""
Args:
features: [N, N, FEATURE_DIM] nparray of uint8
pi: [N * N + 1] nparray of float32
value: float
"""
return tf.train.Example(
features=tf.train.Features(
feature={
'x':
tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[features.tostring()])),
'pi':
tf.train.Feature(
bytes_list=tf.train.BytesList(value=[pi.tostring()])),
'outcome':
tf.train.Feature(
float_list=tf.train.FloatList(value=[value]))
}))
def write_tf_examples(filename, tf_examples, serialize=True):
"""
Args:
filename: Where to write tf.records
tf_examples: An iterable of tf.Example
serialize: whether to serialize the examples.
"""
with tf.python_io.TFRecordWriter(
filename, options=TF_RECORD_CONFIG) as writer:
for ex in tf_examples:
if serialize:
writer.write(ex.SerializeToString())
else:
writer.write(ex)
def batch_parse_tf_example(batch_size, layout, example_batch):
"""
Args:
batch_size: batch size
layout: 'nchw' or 'nhwc'
example_batch: a batch of tf.Example
Returns:
A tuple (feature_tensor, dict of output tensors)
"""
planes = dual_net.get_features_planes()
features = {
'x': tf.FixedLenFeature([], tf.string),
'pi': tf.FixedLenFeature([], tf.string),
'outcome': tf.FixedLenFeature([], tf.float32),
}
parsed = tf.parse_example(example_batch, features)
x = tf.decode_raw(parsed['x'], tf.uint8)
x = tf.cast(x, tf.float32)
if layout == 'nhwc':
shape = [batch_size, go.N, go.N, planes]
else:
shape = [batch_size, planes, go.N, go.N]
x = tf.reshape(x, shape)
pi = tf.decode_raw(parsed['pi'], tf.float32)
pi = tf.reshape(pi, [batch_size, go.N * go.N + 1])
outcome = parsed['outcome']
outcome.set_shape([batch_size])
return x, {'pi_tensor': pi, 'value_tensor': outcome}
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError('Must set shuffle buffer size if shuffling examples')
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(
tf.data.experimental.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset
def _random_rotation(feature_layout, x_tensor, outcome_tensor):
pi_tensor = outcome_tensor['pi_tensor']
if feature_layout == 'nhwc':
x_rot_tensor, pi_rot_tensor = symmetries.rotate_train_nhwc(
x_tensor, pi_tensor)
else:
x_rot_tensor, pi_rot_tensor = symmetries.rotate_train_nchw(
x_tensor, pi_tensor)
outcome_tensor['pi_tensor'] = pi_rot_tensor
return x_rot_tensor, outcome_tensor
def get_input_tensors(batch_size,
feature_layout,
tf_records,
num_repeats=1,
shuffle_
|
records=True,
shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05,
random_rotation=True):
"""Read tf.Records and prepare them for ingestion by
|
dual_net.
See `read_tf_records` for parameter documentation.
Returns a dict of tensors (see return value of batch_parse_tf_example)
"""
print('Reading tf_records from {} inputs'.format(len(tf_records)))
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout), batch_size))
return dataset.make_one_shot_iterator().get_next()
def get_tpu_input_tensors(batch_size,
feature_layout,
tf_records,
num_repeats=1,
shuffle_records=True,
shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05,
random_rotation=True):
# TPUs trains on sequential golden chunks to simplify preprocessing and
# reproducibility.
assert len(tf_records) < 101, 'Use example_buffer to build a golden_chunk'
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
in
|
duncan-r/SHIP
|
ship/fmp/ief.py
|
Python
|
mit
| 11,467
| 0.002703
|
"""
Summary:
Ief file data holder.
Contains the functionality for loading ISIS .ief files from disk.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
import os
from ship.utils import utilfunctions as uf
from ship.utils import filetools as ft
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class IefDataTypes(object):
"""Enum for the different data types within the Ief class.
Use these for easy access of the Ief class.
"""
HEADER, DETAILS, IED_DATA, SNAPSHOTS, DESCRIPTION = range(5)
class Ief(object):
"""Contains the details in the in the IEF file.
Class data and a methods for accessing and upating the .ief file.
"""
def __init__(self, path_holder, header, details, snapshots=None,
ied_data=None, description=None):
"""Constructor.
Args:
path_holder (PathHolder): Object containing the file path to this
ief file.
header: The [Event header] section of th ief file. It contains
data like the title and main filepaths.
details: The [Event Details] section of the ief file. It
contains almost all the other data in the ief file, including
all the flags for the run.
snapshots: List containing a dictionary in each element that
has the snapshot time and filepath.
ied_data: List containing a dictionary in each element that
contains the title and file path for every ied file referenced
in the ief file.
description: List containing the line of the description
section of the file.
"""
self.event_header = header
self.event_details = details
self.snapshots = snapshots
self.ied_data = ied_data
self.description = description
self.path_holder = path_holder
def getFilePaths(self):
"""Returns all the file paths that occur in the ief file.
Most paths are extracted from the head and details data, when they
exist, and are added to paths_dict. If any ied data or snapshot data
exists it will be added as a list to the dictionary.
If a particular path is not found the value will be set to None, unless,
it's ied or snapshot data in which case it will be an empty list.
Dict keys are: Datafile, Results, InitialConditions, 2DFile, ied,
and snapshots.
Returns:
dict - containing all of the path data stored by this object.
"""
paths_dict = {}
try:
paths_dict['Datafile'] = self._findVarInDictionary(self.event_header, 'Datafile')
except:
paths_dict['Datafile'] = None
try:
paths_dict['Results'] = self._findVarInDictionary(self.event_header, 'Results')
except:
paths_dict['Results'] = None
try:
paths_dict['InitialConditions'] = self._findVarInDictionary(self.event_details, 'InitialConditions')
except:
paths_dict['InitialConditions'] = None
try:
paths_dict['2DFile'] = self._findVarInDictionary(self.event_details, '2DFile')
except:
paths_dict['2DFile'] = None
if not self.ied_data is None and not self.ied_data == []:
ied_paths = [ied['file'] for ied in self.ied_data]
paths_dict['ieds'] = ied_paths
else:
paths_dict['ieds'] = []
if not self.snapshots is None and not self.snapshots == []:
snapshot_paths = [snap['file'] for snap in self.snapshots]
paths_dict['snapshots'] = snapshot_paths
else:
paths_dict['snapshots'] = []
return paths_dict
def getValue(self, key):
"""Get a value from one of the variables dictionaries.
All single variables (i.e. not lists like ied data) are stored in two
main dictionaries. This method will return the value associated with
the given key from whichever dictionary it is stored in.
Args:
key(str): dict key for value. For a list of available keys use the
getAvailableKeys method.
Return:
string: value referenced by the given key, in the ief file.
Raises:
KeyError:
|
if the given key does not exist.
"""
if key in self.event_header.keys():
return self.event_header[key]
elif key in self.event_details.keys():
|
return self.event_details[key]
def getIedData(self):
"""Get all of the ied data stored in this object.
There can be multiple ied files referenced by an ief. This will return
a dictionary containing all of them.
If no ied files are included in the ief file the returned list will
be empty.
Returns:
dict - containing {ied_name: ied_path} for all ied files referenced.
"""
if self.ied_data == None:
return []
else:
return self.ied_data
def getSnapshots(self):
"""Get all of the snapshot data stored in this object.
There can be multiple snapshot files referenced by an ief. This will return
a dictionary containing all of them.
If no snapshots are included in the ief file the returned list will
be empty.
Returns:
dict - containing {snapshot_time: snapshot_path} for all snapshot
files referenced.
"""
if self.snapshots == None:
return []
else:
self.snapshots
def getDescription(self):
"""Returns the description component of the ief."""
return self.description
def setValue(self, key, value):
"""Set the value of one of dictionary entries in the ief.
Args:
key(str): The key of the value to update.
value(str(: the value to update.
Raises:
KeyError: if given key is not recongised.
Warning:
Currently no checks are made on the validity of the the key given
this is because it may be a legal key, but not yet exist in the
dictionary. To fix this a list of all valid keys should be created
and checked here before setting the value. These are the keys used
in the ief file.
"""
headlist = ['Title', 'Path', 'Datafile', 'Results']
if key in headlist:
self.event_header[key] = value
else:
self.event_details[key] = value
def addIedFile(self, ied_path, name=''):
"""Add a new ied file.
Args:
ied_path(str): path to an ied file.
name=''(str): name for the ied file.
"""
if self.ied_data is None:
self.ied_data = []
self.ied_data.append({'name': name, 'file': ied_path})
def addSnapshot(self, snapshot_path, time):
"""Add a new snapshot.
Args:
snapshot_path(str): the path for the snapshot.
time(float): the time to assign to the snapshot.
"""
if self.snapshots is None:
self.snapshots = []
if not uf.isNumeric(time):
raise ValueError('time is not a numeric value')
self.snapshots.append({'time': time, 'file': snapshot_path})
def _findVarInDictionary(self, the_dict, key):
"""Returns the variable in a dictionary.
Tests to see if a variables exists under the given key in the given
dictionary. If it does it will return it.
Args:
the_dict (Dict): Dictionary in which to check the keys existence.
key (str): Key to look for in the dictionary.
Returns:
The requested variable if it exists or False if not.
"""
try:
variable = the_dict[key]
except KeyError:
logger.debug('No ' + key + ' key found in ief')
|
titilambert/home-assistant
|
homeassistant/components/homekit/type_cameras.py
|
Python
|
apache-2.0
| 15,437
| 0.000518
|
"""Class to hold all camera accessories."""
import asyncio
from datetime import timedelta
import logging
from haffmpeg.core import HAFFmpeg
from pyhap.camera import (
VIDEO_CODEC_PARAM_LEVEL_TYPES,
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES,
Camera as PyhapCamera,
)
from pyhap.const import CATEGORY_CAMERA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.util import get_local_ip
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_MOTION_DETECTED,
CHAR_MUTE,
CHAR_PROGRAMMABLE_SWITCH_EVENT,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
SERV_DOORBELL,
SERV_MOTION_SENSOR,
SERV_SPEAKER,
SERV_STATELESS_PROGRAMMABLE_SWITCH,
)
from .img_util import scale_jpeg_camera_image
from .util import pid_is_alive
_LOGGER = logging.getLogger(__name__)
DOORBELL_SINGLE_PRESS = 0
DOORBELL_DOUBLE_PRESS = 1
DOORBELL_LONG_PRESS = 2
VIDEO_OUTPUT = (
"-map {v_map} -an "
"-c:v {v_codec} "
"{v_profile}"
"-tune zerolatency -pix_fmt yuv420p "
"-r {fps} "
"-b:v {v_max_bitrate}k -bufsize {v_bufsize}k -maxrate {v_max_bitrate}k "
"-payload_type 99 "
"-ssrc {v_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} "
"srtp://{address}:{v_port}?rtcpport={v_port}&"
"localrtcpport={v_port}&pkt_size={v_pkt_size}"
)
AUDIO_OUTPUT = (
"-map {a_map} -vn "
"-c:a {a_encoder} "
"{a_application}"
"-ac 1 -ar {a_sample_rate}k "
"-b:a {a_max_bitrate}k -bufsize {a_bufsize}k "
"-payload_type 110 "
"-ssrc {a_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {a_srtp_key} "
"srtp://{address}:{a_port}?rtcpport={a_port}&"
"localrtcpport={a_port}&pkt_size={a_pkt_size}"
)
SLOW_RESOLUTIONS = [
(320, 180, 15),
(320, 240, 15),
]
RESOLUTIONS = [
(320, 180),
(320, 240),
(480, 270),
(480, 360),
(640, 360),
(640, 480),
(1024, 576),
(1024, 768),
(1280, 720),
(1280, 960),
(1920, 1080),
]
VIDEO_PROFILE_NAMES = ["baseline", "main", "high"]
FFMPEG_WATCH_INTERVAL = timedelta(seconds=5)
FFMPEG_WATCHER = "ffmpeg_watcher"
FFMPEG_PID = "ffmpeg_pid"
SESSION_ID = "session_id"
CONFIG_DEFAULTS = {
CONF_SUPPORT_AUDIO: DEFAULT_SUPPORT_AUDIO,
CONF_MAX_WIDTH: DEFAULT_MAX_WIDTH,
CONF_MAX_HEIGHT: DEFAULT_MAX_HEIGHT,
CONF_MAX_FPS: DEFAULT_MAX_FPS,
CONF_AUDIO_CODEC: DEFAULT_AUDIO_CODEC,
CONF_AUDIO_MAP: DEFAULT_AUDIO_MAP,
CONF_VIDEO_MAP: DEFAULT_VIDEO_MAP,
CONF_VIDEO_CODEC: DEFAULT_VIDEO_CODEC,
CONF_AUDIO_PACKET_SIZE: DEFAULT_AUDIO_PACKET_SIZE,
CONF_VIDEO_PACKET_SIZE: DEFAULT_VIDEO_PACKET_SIZE,
CONF_STREAM_COUNT: DEFAULT_STREAM_COUNT,
}
@TYPES.register("Camera")
class Camera(HomeAccessory, PyhapCamera):
"""Generate a Camera accessory."""
def __init__(self, hass, driver, name, entity_id, aid, config):
"""Initialize a Camera accessory object."""
self._ffmpeg = hass.data[DATA_FFMPEG]
for config_key in CONFIG_DEFAULTS:
if config_key not in config:
config[config_key] = CONFIG_DEFAULTS[config_key]
max_fps = config[CONF_MAX_FPS]
max_width = config[CONF_MAX_WIDTH]
max_height = config[CONF_MAX_HEIGHT]
resolutions = [
(w, h, fps)
for w, h, fps in SLOW_RESOLUTIONS
if w <= max_width and h <= max_height and fps < max_fps
] + [
(w, h, max_fps)
for w, h in RESOLUTIONS
if w <= max_width and h <= max_height
]
video_options = {
"codec": {
"profiles": [
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["MAIN"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["HIGH"],
],
"levels": [
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_1"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_2"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE4_0"],
],
},
"resolutions": resolutions,
}
audio_options = {
"codecs": [
{"type": "OPUS", "samplerate": 24},
{"type": "OPUS", "samplerate": 16},
]
}
stream_address = config.get(CONF_STREAM_ADDRESS, get_local_ip())
options = {
"video": video_options,
"audio": audio_options,
"address": stream_address,
"srtp": True,
"stream_count": config[CONF_STREAM_COUNT],
}
super().__init__(
hass,
driver,
name,
entity_id,
aid,
config,
category=CATEGORY_CAMERA,
options=options,
)
self._char_motion_detected = None
self.linked_motion_sensor = self.config.get(CONF_LINKED_MOTION_SENSOR)
if self.linked_motion_sensor:
state = self.hass.states.get(self.linked_motion_sensor)
if state:
serv_motion = self.add_preload_service(SERV_MOTION_SENSOR)
self._char_motion_detected = serv_motion.configure_char(
CHAR_MOTION_DETECTED, value=False
)
self._async_update_motion_state(state)
self._char_doorbell_detected = None
self._char_doorbell_detected_switch = None
self.linked_doorbell_sensor = self.config.get(CONF_LINKED_DOORBELL_SENSOR)
if self.linked_doorbell_sensor:
state = self.hass.states.get(self.linked_doorbell_sensor)
if state:
serv_doorbell = self.add_preload_service(SERV_DOORBELL)
self.set_primary_service(serv_doorbell)
self._char_doorbell_detected = serv_doorbell.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT, value=0,
)
serv_stateless_switch = self.add_preload_service(
SERV_STATELESS_PROGRAMMABLE_SWITCH
)
self._char_doorbell_detected_switch = serv_stateless_switch.configure_char(
CHAR_PROGRAMMABLE_SWITCH_EVENT,
value=0,
valid_values={"SinglePress": DOORBELL_SINGLE_PRESS},
)
serv_speaker = self.add_preload_service(SERV_SPEAKER)
serv_speaker.configure_char(CHAR_MUTE, value=0)
self._async_update_doorbell_state(state)
async def run_handler(self):
"""Handle accessory driver started event.
Run insi
|
de the Home Assistant event loop.
"""
if self._char_motion_detected:
async_track_state_change_event(
self.hass,
[self.linked_motion_sensor],
self._async_update_motion_state_event,
)
if self._char_doorbell_detected:
async_track_state_change_
|
event(
self.hass,
[self.linked_doorbell_sensor],
self._async_update_doorbell_state_event,
)
await super().run_handler()
@callback
def _async_update_motion_state_event(self, event):
"""Handle state change event listener callback."""
self._async_update_motion_state(event.data.get("new_
|
tjgavlick/whiskey-blog
|
app/__init__.py
|
Python
|
mit
| 908
| 0.002203
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from app.markdown import markdown
from app.function
|
s import format_price, format_age, format_age_range, \
format_proof, format_date, format_dat
|
etime, modify_query
app = Flask(__name__)
app.config.from_object('app.config.DevelopmentConfig')
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals.update(format_price=format_price)
app.jinja_env.globals.update(format_age=format_age)
app.jinja_env.globals.update(format_age_range=format_age_range)
app.jinja_env.globals.update(format_proof=format_proof)
app.jinja_env.globals.update(modify_query=modify_query)
app.jinja_env.globals.update(markdown=markdown)
app.jinja_env.filters['date'] = format_date
app.jinja_env.filters['datetime'] = format_datetime
db = SQLAlchemy(app)
from app import models, views
|
andrewsosa/hackfsu_com
|
api/api/migrations/0018_hackathonsponsor_on_mobile.py
|
Python
|
apache-2.0
| 466
| 0
|
# -*- codi
|
ng: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-09 08:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0017_mentorinfo_availability'),
]
operations = [
migrations.AddField(
model_name='hackathonsponsor',
name='on_mobile',
field=models.BooleanField(default=False),
|
),
]
|
weolar/miniblink49
|
v8_7_5/tools/clusterfuzz/v8_fuzz_config.py
|
Python
|
apache-2.0
| 1,510
| 0.003311
|
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
# List of configuration experiments for correctness fuzzing.
# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
# Probabilities must add up to 100.
FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
[30, 'ignition', 'ignition_turbo', 'd8'],
[20, 'ignition', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
[5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
]
class Config(object):
def __init__(self, name, rng=None):
self.name = name
self.rng = rng or random.Ra
|
ndom()
def choose_foozzie_flags(self):
"""Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
Returns: List of flags to pass to v8_foozzie.py fuzz harness.
"""
acc = 0
threshold = self.rng.random() * 100
for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
acc += prob
if acc > threshold:
return [
'--first-config=' + first_config,
'--second-config=' + second_config,
|
'--second-d8=' + second_d8,
]
assert False
|
RasaHQ/rasa_nlu
|
tests/nlu/emulators/test_no_emulator.py
|
Python
|
apache-2.0
| 948
| 0.001055
|
def test_dummy_request():
from rasa.nlu.emulators.no_emulator import NoEmulator
em = NoEmulator()
norm = em.normalise_request_json({"text": ["arb text"]})
assert norm == {"text": "arb text", "time": None}
norm = em.normalise_request_json({"text": ["arb text"], "time": "1499279161658"})
assert norm == {"text": "arb text", "time": "1499279161658"}
def test_dummy_response():
from rasa.nlu.emulators.no_emulat
|
or import NoEmulator
em = NoEmulator()
data = {"intent": "greet", "text": "hi", "entities": {}, "confidence": 1.0}
assert em.normalise_response_json(data) == data
def test_emulators_can_handle_missing_d
|
ata():
from rasa.nlu.emulators.luis import LUISEmulator
em = LUISEmulator()
norm = em.normalise_response_json(
{"text": "this data doesn't contain an intent result"}
)
assert norm["prediction"]["topIntent"] is None
assert norm["prediction"]["intents"] == {}
|
jesford/AstroLabels
|
images/sample.py
|
Python
|
mit
| 481
| 0.004158
|
imp
|
ort numpy as np
import matplotlib.pyplot as plt; plt.ion()
import matplotlib
import seaborn; seaborn.set()
from astrolabels import AstroLabels
al = AstroLabels()
matplotlib.rcParams["axes.labelsize"] = 15
matplotlib.rcParams["legend.fontsize"] = 15
# hopefully your data doesn't look like this...
plt.figure(figsize=(4, 3))
plt.plot(np.random.rand(10), np.random.rand(10), 'o')
plt.xlabel(al.r_mpc)
plt.ylabel(al.dsigma_off)
plt.tight_layout()
p
|
lt.savefig('sample_plot.png')
|
ryanhorn/tyoiOAuth2
|
setup.py
|
Python
|
mit
| 463
| 0.00216
|
from setuptools import setup
setup(
name="tyoi.OAuth2",
|
version="0.2.1",
author="Ryan Horn",
author_email="ryan.horn.web@gmail.com",
description=("Implements the client side of the OAuth 2 protocol"),
keywords="oauth oauth2 auth authentication",
url="https://github.com/ryanhorn/tyoiOAuth2",
packages=["tyoi", "tyoi.oauth2", "tyoi.oauth2.grants", "tyoi.oauth2.authenticators"
|
],
test_suite="tests",
tests_require=["mox"]
)
|
ASCIT/donut-python
|
donut/modules/voting/ranked_pairs.py
|
Python
|
mit
| 2,107
| 0.000949
|
from itertools import chain, combinations, permutations
class RankedPairsResult:
def __init__(self, tallies, winners):
self.tallies = tallies
self.winners = winners
def results(responses):
"""
Returns the list of ranked-pairs winners based on responses.
Takes as input a list of rankings, e.g. [
[['A'], ['B'], ['NO']], # A, then B, then NO, then C
[['A', 'C'], ['B']], # A or C, then B, then NO
[['NO']] # NO, then A or B or C
]
"""
all_candidates = set(vote
for response in responses for rank in response
for vote in rank)
tallies = { # mapping of pairs (A, B) of candidates
pair: 0 # to numbers of responders who ranked A above B
for pair in permutations(all_candidates, 2)
}
for response in responses:
ranked = set(vote for rank in response for vote in rank)
ranks = chain(response, (all_candidates - ranked, ))
for rank_A, rank_B in combinations(ranks, 2):
for A
|
in rank_A:
for B in rank_B:
tallies[A, B] += 1
def tally_ranking(pair):
"""
The keyfunction which implements the 'ranking' in ranked pairs.
Sorts pairs by highest in favor, or if equal, fewest opposed.
"""
A, B = pair
return (-tallies[A, B], tallies[B, A])
possible_pairs = sorted(tallies, key=tally_ranking)
|
# Vertices reachable from A in win graph
lower = {A: set((A, )) for A in all_candidates}
# Vertices A is reachable from in win graph
higher = {A: set((A, )) for A in all_candidates}
for A, B in possible_pairs:
if A not in lower[B]: # if we don't already have B > A, set A > B
for s in higher[A]: # if s > ... > A
for t in lower[B]: # and B > ... > t
lower[s].add(t) # then s > ... > t
higher[t].add(s)
winners = sorted(all_candidates, key=lambda A: len(higher[A]))
return RankedPairsResult(tallies, winners)
|
100grams/flask-security
|
tests/functional_tests.py
|
Python
|
mit
| 9,642
| 0.000104
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import simplejson as json
from cookielib import Cookie
from werkzeug.utils import parse_cookie
from tests import SecurityTest
def get_cookies(rv):
cookies = {}
for value in rv.headers.get_all("Set-Cookie"):
cookies.update(parse_cookie(value))
return cookies
class DefaultSecurityTests(SecurityTest):
def test_instance(self):
self.assertIsNotNone(self.app)
self.assertIsNotNone(self.app.security)
self.assertIsNotNone(self.app.security.pwd_context)
def test_login_view(self):
r = self._get('/login')
self.assertIn('<h1>Login</h1>', r.data)
def test_authenticate(self):
r = self.authenticate()
self.assertIn('Hello matt@lp.com', r.data)
def test_unprovided_username(self):
r = self.authenticate("")
self.assertIn(self.get_message('EMAIL_NOT_PROVIDED'), r.data)
def test_unprovided_password(self):
r = self.authenticate(password="")
self.assertIn(self.get_message('PASSWORD_NOT_PROVIDED'), r.data)
def test_invalid_user(self):
r = self.authenticate(email="bogus@bogus.com")
self.assertIn(self.get_message('USER_DOES_NOT_EXIST'), r.data)
def test_bad_password(self):
r = self.authenticate(password="bogus")
self.assertIn(self.get_message('INVALID_PASSWORD'), r.data)
def test_inactive_user(self):
r = self.authenticate("tiya@lp.com", "password")
self.assertIn(self.get_message('DISABLED_ACCOUNT'), r.data)
def test_logout(self):
self.authenticate()
r = self.logout()
self.assertIsHomePage(r.data)
def test_unauthorized_access(self):
r = self._get('/profile', follow_redirects=True)
self.assertIn('<li class="message">Please log in to access this page.</li>', r.data)
def test_authorized_access(self):
self.authenticate()
r = self._get("/profile")
self.assertIn('profile', r.data)
def test_valid_admin_role(self):
self.authenticate()
r = self._get("/admin")
self.assertIn('Admin Page', r.data)
def test_invalid_admin_role(self):
self.authenticate("joe@lp.com")
r = self._get("/admin", follow_redirects=True)
self.assertIsHomePage(r.data)
def test_roles_accepted(self):
for user in ("matt@lp.com", "joe@lp.com"):
self.authenticate(user)
r = self._get("/admin_or_editor")
self.assertIn('Admin or Editor Page', r.data)
self.logout()
self.authenticate("jill@lp.com")
r = self._get("/admin_or_editor", follow_redirects=True)
self.assertIsHomePage(r.data)
def test_unauthenticated_role_required(self):
r = self._get('/admin', follow_redirects=True)
self.assertIn(self.get_message('UNAUTHORIZED'), r.data)
def test_multiple_role_required(self):
for user in ("matt@lp.com", "joe@lp.com"):
self.authenticate(user)
r = self._get("/admin_and_editor", follow_redirects=True)
self.assertIsHomePage(r.data)
self._get('/logout')
self.authenticate('dave@lp.com')
r = self._get("/admin_and_editor", follow_redirects=True)
self.assertIn('Admin and Editor Page', r.data)
def test_ok_json_auth(self):
r = self.json_authenticate()
data = json.loads(r.data)
self.assertEquals(data['meta']['code'], 200)
self.assertIn('authentication_token', data['response']['user'])
def test_invalid_json_auth(self):
r = self.json_authenticate(password='junk')
self.assertIn('"code": 400', r.data)
def test_token_auth_via_querystring_valid_token(self):
r = self.json_authenticate()
data = json.loads(r.data)
token = data['response']['user']['authentication_token']
r = self._get('/token?auth_token=' + token)
self.assertIn('Token Authentication', r.data)
def test_token_auth_via_header_valid_token(self):
r = self.json_authenticate()
data = json.loads(r.data)
token = data['response']['user']['authentication_token']
headers = {"Authentication-Token": token}
r = self._get('/token', headers=headers)
self.assertIn('Token Authentication', r.data)
def test_token_auth_via_querystring_invalid_token(self):
r = self._get('/token?auth_token=X')
self.assertEqual(401, r.status_code)
def test_token_auth_via_header_invalid_token(self):
r = self._get('/token', headers={"Authentication-Token": 'X'})
self.assertEqual(401, r.status_code)
def test_http_auth(self):
r = self._get('/http', headers={
'Authorization': 'Basic ' + base64.b64encode("joe@lp.com:password")
})
self.assertIn('HTTP Authentication', r.data)
def test_http_auth_no_authorization(self):
r = self._get('/http', headers={})
self.assertIn('<h1>Unauthorized</h1>', r.data)
self.assertIn('WWW-Authenticate', r.headers)
self.assertEquals('Basic realm="Login Required"',
r.headers['WWW-Authenticate'])
def test_invalid_http_auth_invalid_username(self):
r = self._get('/http', headers={
'Authorization': 'Basic ' + base64.b64encode("bogus:bogus")
})
self.assertIn('<h1>Unauthorized</h1>', r.data)
self.assertIn('WWW-Authenticate', r.headers)
self.assertEquals('Basic realm="Login Required"',
r.headers['WWW-Authenticate'])
def test_invalid_http_auth_bad_password(self):
r = self._get('/http', headers={
'Authorization': 'Basic ' + base64.b64encode("joe@lp.com:bogus")
})
self.assertIn('<h1>Unauthorized</h1>', r.data)
self.assertIn('WWW-Authenticate', r.headers)
self.assertEquals('Basic realm="Login Required"',
r.headers['WWW-Authenticate'])
def test_custom_http_auth_realm(self):
r = self._get('/http_custom_realm', headers={
'Authorization': 'Basic ' + base64.b64encode("joe@lp.com:bogus")
})
self.assertIn('<h1>Unauthorized</h1>', r.data)
self.assertIn('WWW-Authenticate', r.headers)
self.assertEquals('Basic realm="My Realm"',
r.headers['WWW-Authenticate'])
def test_multi_auth_basic(self):
r = self._get('/multi_auth', headers={
'Authorization': 'Basic ' + base64.b64encode(
|
"joe@lp.com:password")
})
self.assertIn('Basic', r.data)
def test_multi_auth_token(self):
r = self.json_authenticate()
data = json.loads(r.data)
token = data['response']['user']['authenticat
|
ion_token']
r = self._get('/multi_auth?auth_token=' + token)
self.assertIn('Token', r.data)
def test_multi_auth_session(self):
self.authenticate()
r = self._get('/multi_auth')
self.assertIn('Session', r.data)
def test_user_deleted_during_session_reverts_to_anonymous_user(self):
self.authenticate()
with self.app.test_request_context('/'):
user = self.app.security.datastore.find_user(email='matt@lp.com')
self.app.security.datastore.delete_user(user)
self.app.security.datastore.commit()
r = self._get('/')
self.assertNotIn('Hello matt@lp.com', r.data)
def test_remember_token(self):
r = self.authenticate(follow_redirects=False)
self.client.cookie_jar.clear_session_cookies()
r = self._get('/profile')
self.assertIn('profile', r.data)
def test_token_loader_does_not_fail_with_invalid_token(self):
c = Cookie(version=0, name='remember_token', value='None', port=None,
port_specified=False, domain='www.example.com',
domain_specified=False, domain_initial_dot=False, path='/',
path_specified=True, secure=False, expires=None,
discard=True, comment=None, comment_url=None,
rest={'HttpOnly': None}, rfc2109
|
SpectraLogic/ds3_python3_sdk
|
samples/getService.py
|
Python
|
apache-2.0
| 784
| 0.002551
|
# Copyright 2014-2017 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file
|
.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ds3 import ds3
client = ds3.createClientFromEnv()
getServiceResponse = clien
|
t.get_service(ds3.GetServiceRequest())
for bucket in getServiceResponse.result['BucketList']:
print(bucket['Name'])
|
SalemHarrache/PyVantagePro
|
setup.py
|
Python
|
gpl-3.0
| 2,383
| 0.000839
|
# coding: utf8
'''
PyVantagePro
------------
Communication tools for the Davis VantagePro2 devices.
:copyright: Copyright 2012 Salem Harrache and contributors, see AUTHORS.
:license: GNU GPL v3.
'''
import re
import sys
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = ''
CHANGES = ''
try:
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
except:
pass
REQUIREMENTS = [
'pylink',
'progressbar-latest',
]
if sys.version_info < (2, 7):
REQUIREMENTS.append('ordereddict')
if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2):
# In the stdlib from 2.7:
REQUIREMENTS.append('argparse')
with open(os.path.join(os.path.dirname(__file__), 'pyvantagepro',
'__init__.py')) as init_py:
release = re.search("VERSION = '([^']+)'", init_py.read()).group(1)
# The short X.Y version.
version = release.rstrip('dev')
setup(
name='PyVantagePro',
version=version,
url='https://github.com/SalemHarrache/PyVantagePro',
license='GNU GPL v3',
description='Communication tools for the Davis VantagePro2 devices',
long_description=README + '\n\n' + CHANGES,
author='Salem Harrache',
author_email='salem.harrache@gmail.com',
maintainer='Lionel Darras',
maintainer_email='Lionel.Darras@obs.ujf-grenoble.fr',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Au
|
dience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Pyt
|
hon :: 3.2',
'Topic :: Internet',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages(),
zip_safe=False,
install_requires=REQUIREMENTS,
test_suite='pyvantagepro.tests',
entry_points={
'console_scripts': [
'pyvantagepro = pyvantagepro.__main__:main'
],
},
)
|
dayatz/taiga-back
|
taiga/base/api/settings.py
|
Python
|
agpl-3.0
| 8,478
| 0.000472
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Esp
|
ino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in t
|
he hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The code is partially taken (and modified) from django rest framework
# that is licensed under the following terms:
#
# Copyright (c) 2011-2014, Tom Christie
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
For example your project's `settings.py` file might look like this:
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": (
"taiga.base.api.renderers.JSONRenderer",
)
"DEFAULT_PARSER_CLASSES": (
"taiga.base.api.parsers.JSONParser",
)
}
This module provides the `api_setting` object, that is used to access
REST framework settings, checking for user settings first, then falling
back to the defaults.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils import six
import importlib
from . import ISO_8601
USER_SETTINGS = getattr(settings, "REST_FRAMEWORK", None)
DEFAULTS = {
# Base API policies
"DEFAULT_RENDERER_CLASSES": (
"taiga.base.api.renderers.JSONRenderer",
),
"DEFAULT_PARSER_CLASSES": (
"taiga.base.api.parsers.JSONParser",
"taiga.base.api.parsers.FormParser",
"taiga.base.api.parsers.MultiPartParser"
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"taiga.base.api.authentication.SessionAuthentication",
"taiga.base.api.authentication.BasicAuthentication"
),
"DEFAULT_PERMISSION_CLASSES": (
"taiga.base.api.permissions.AllowAny",
),
"DEFAULT_THROTTLE_CLASSES": (
),
"DEFAULT_CONTENT_NEGOTIATION_CLASS":
"taiga.base.api.negotiation.DefaultContentNegotiation",
# Genric view behavior
"DEFAULT_MODEL_SERIALIZER_CLASS":
"taiga.base.api.serializers.ModelSerializer",
"DEFAULT_MODEL_VALIDATOR_CLASS":
"taiga.base.api.validators.ModelValidator",
"DEFAULT_FILTER_BACKENDS": (),
# Throttling
"DEFAULT_THROTTLE_RATES": {
"user": None,
"anon": None,
},
"DEFAULT_THROTTLE_WHITELIST": [],
# Pagination
"PAGINATE_BY": None,
"PAGINATE_BY_PARAM": None,
"MAX_PAGINATE_BY": None,
# Authentication
"UNAUTHENTICATED_USER": "django.contrib.auth.models.AnonymousUser",
"UNAUTHENTICATED_TOKEN": None,
# View configuration
"VIEW_NAME_FUNCTION": "taiga.base.api.views.get_view_name",
"VIEW_DESCRIPTION_FUNCTION": "taiga.base.api.views.get_view_description",
# Exception handling
"EXCEPTION_HANDLER": "taiga.base.api.views.exception_handler",
# Testing
"TEST_REQUEST_RENDERER_CLASSES": (
"taiga.base.api.renderers.MultiPartRenderer",
"taiga.base.api.renderers.JSONRenderer"
),
"TEST_REQUEST_DEFAULT_FORMAT": "multipart",
# Browser enhancements
"FORM_METHOD_OVERRIDE": "_method",
"FORM_CONTENT_OVERRIDE": "_content",
"FORM_CONTENTTYPE_OVERRIDE": "_content_type",
"URL_ACCEPT_OVERRIDE": "accept",
"URL_FORMAT_OVERRIDE": "format",
"FORMAT_SUFFIX_KWARG": "format",
"URL_FIELD_NAME": "url",
# Input and output formats
"DATE_INPUT_FORMATS": (
ISO_8601,
),
"DATE_FORMAT": ISO_8601,
"DATETIME_INPUT_FORMATS": (
ISO_8601,
),
"DATETIME_FORMAT": None,
"TIME_INPUT_FORMATS": (
ISO_8601,
),
"TIME_FORMAT": None,
# Pending deprecation
"FILTER_BACKEND": None,
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
"DEFAULT_RENDERER_CLASSES",
"DEFAULT_PARSER_CLASSES",
"DEFAULT_AUTHENTICATION_CLASSES",
"DEFAULT_PERMISSION_CLASSES",
"DEFAULT_THROTTLE_CLASSES",
"DEFAULT_CONTENT_NEGOTIATION_CLASS",
"DEFAULT_MODEL_SERIALIZER_CLASS",
"DEFAULT_FILTER_BACKENDS",
"EXCEPTION_HANDLER",
"FILTER_BACKEND",
"TEST_REQUEST_RENDERER_CLASSES",
"UNAUTHENTICATED_USER",
"UNAUTHENTICATED_TOKEN",
"VIEW_NAME_FUNCTION",
"VIEW_DESCRIPTION_FUNCTION"
)
def perform_import(val, setting_name):
"""
If the given setting is a string import notation,
then perform the necessary import or imports.
"""
if isinstance(val, six.string_types):
return import_from_string(val, setting_name)
elif isinstance(val, (list, tuple)):
return [import_from_string(item, setting_name) for item in val]
return val
def import_from_string(val, setting_name):
"""
Attempt to import a class from a string representation.
"""
try:
# Nod to tastypie's use of importlib.
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
module = importlib.import_module(module_path)
return getattr(module, class_name)
except ImportError as e:
msg = "Could not import '%s' for API setting '%s'. %s: %s." % (val, setting_name, e.__class__.__name__, e)
raise ImportError(msg)
class APISettings(object):
"""
A settings object, that allows API settings to be accessed as properties.
For example:
from taiga.base.api.settings import api_settings
print api_settings.DEFAULT_RENDERER_CLASSES
Any setting with string import paths will be automatically resolved
and return the class, rather than the string literal.
"""
def __init__(self, user_settings=None, defaults=None, import_strings=None):
self.user_settings = user_settings or {}
self.defaults = defaults or {}
self.import_strings = import_strings or ()
def __getattr__(self, attr):
if attr not in self.defaults.keys():
raise AttributeError("Invalid API setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
# Coerce import strings into classes
if val and attr in self.import_strings:
val = perform_import(val, attr)
self.validate_setting(attr, val)
# Cache the res
|
hammerlab/immuno
|
immuno/ui.py
|
Python
|
apache-2.0
| 14,247
| 0.004633
|
from os import environ, getcwd
from os.path import exists, join
from common import str2bool, env_var
from group_epitopes import group_epitopes_dataframe
from hla_file import read_hla_file
from immunogenicity import ImmunogenicityPredictor
from load_file import expand_transcripts
from load_file import load_variants
from mhc_common import (
normalize_hla_allele_name,
mhc_class_from_normalized_allele_name
)
from mhc_netmhcpan import PanBindingPredictor
from mhc_netmhccons import ConsensusBindingPredictor
import mhc_random
from mhc_iedb import IEDBMHC1Binding
from mhc_netmhcpan import PanBindingPredictor
from peptide_binding_measure import IC50_FIELD_NAME, PERCENTILE_RANK_FIELD_NAME
from vcf import load_vcf
from flask import Flask
from flask import (redirect, request, render_template, url_for,
send_from_directory, flash, make_response)
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.user import (current_user, login_required, UserManager,
UserMixin, SQLAlchemyAdapter)
from flask_mail import Mail, Message
from flask.ext.wtf import Form
from flask.ext.wtf.file import FileField, FileRequired, FileAllowed
from jinja2 import ChoiceLoader, FileSystemLoader
from json import loads, dumps
from natsort import natsorted
from pandas import DataFrame, Series, concat, merge
from werkzeug import secure_filename
from wtforms import (SubmitField, TextField, TextAreaField, SelectField,
validators)
class ConfigClass(object):
# Custom config
DEBUG = env_var('IMMUNO_DEBUG', str2bool, False)
PORT = env_var('IMMUNO_PORT', int, 5000)
USE_RELOADER = env_var('IMMUNO_USE_RELOADER', str2bool, False)
UPLOAD_FOLDER = join(getcwd(), 'uploads')
# Flask config
SECRET_KEY = environ.get('IMMUNO_SECRET_KEY')
assert SECRET_KEY, \
"Environment variable IMMUNO_SECRET_KEY must be set"
SQLALCHEMY_DATABASE_URI = environ.get('IMMUNO_DB')
assert SQLALCHEMY_DATABASE_URI, \
"Environment variable IMMUNO_DB must be set"
# Flask-User config
USER_PRODUCT_NAME = 'immuno'
USER_ENABLE_EMAIL = True
USER_ENABLE_CHANGE_PASSWORD = True
USER_ENABLE_CHANGE_USERNAME = False
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_FORGOT_PASSWORD = True
USER_ENABLE_MULTIPLE_EMAILS = False
USER_ENABLE_REGISTRATION = True
USER_EN
|
ABLE_RETYPE_PASSWORD = True
USER_ENABLE_USERNAME = False
USER_CONFIRM_EMAIL_EXPIRATION = 2 * 24 * 3600
USER_PASSWORD_HASH = 'bcrypt'
USER_PASSWORD_HASH_MODE = 'passlib'
USER_REQUIRE_INVITATION = False
USER_RESET_PASSWORD_EXPIRATION = 2 * 24 * 3600
USER_SEND_PASSWORD_CHANGED_EMAIL = True
USER_SEND_REGISTERED_EMAIL = Tru
|
e
USER_SEND_USERNAME_CHANGED_EMAIL = False
# Flask-Mail config
MAIL_SERVER = environ.get('IMMUNO_MAIL_SERVER')
assert MAIL_SERVER, \
"Environment variable IMMUNO_MAIL_SERVER must be set"
MAIL_PORT = env_var('IMMUNO_MAIL_PORT', int, 5000)
MAIL_USE_SSL = env_var('IMMUNO_MAIL_USE_SSL', str2bool, False)
MAIL_USE_TLS = env_var('IMMUNO_MAIL_USE_TLS', str2bool, False)
MAIL_USERNAME = environ.get('IMMUNO_MAIL_USERNAME')
assert MAIL_USERNAME, \
"Environment variable IMMUNO_MAIL_USERNAME must be set"
MAIL_PASSWORD = environ.get('IMMUNO_MAIL_PASSWORD')
assert MAIL_PASSWORD, \
"Environment variable IMMUNO_MAIL_PASSWORD must be set"
MAIL_DEFAULT_SENDER = environ.get('IMMUNO_MAIL_DEFAULT_SENDER')
assert MAIL_DEFAULT_SENDER, \
"Environment variable IMMUNO_MAIL_DEFAULT_SENDER must be set"
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
mail = Mail()
mail.init_app(app)
db = SQLAlchemy(app)
print 'DB: %s' % db
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
active = db.Column(db.Boolean(), nullable=False, default=False)
password = db.Column(db.String(255), nullable=False, default='')
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
reset_password_token = db.Column(db.String(100), nullable=False, default='')
class Patient(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
display_id = db.Column(db.String(1000), nullable=False, unique=True)
class Variant(db.Model):
id = db.Column(db.Integer, primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('patient.id'))
chr = db.Column(db.String(255), nullable=False)
pos = db.Column(db.Integer, nullable=False)
ref = db.Column(db.String(1000), nullable=True)
alt = db.Column(db.String(1000), nullable=False)
def __init__(self, patient_id, chr, pos, ref, alt):
self.patient_id = patient_id
self.chr = chr
self.pos = pos
self.ref = ref
self.alt = alt
class HLAType(db.Model):
id = db.Column(db.Integer, primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('patient.id'))
allele = db.Column(db.String(15), nullable=False)
mhc_class = db.Column(db.SmallInteger, nullable=False)
def __init__(self, patient_id, allele, mhc_class):
self.patient_id = patient_id
self.allele = allele
self.mhc_class = mhc_class
class Run(db.Model):
id = db.Column(db.Integer, primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('patient.id'))
output = db.Column(db.Text, nullable=False)
def __init__(self, patient_id, output):
self.patient_id = patient_id
self.output = output
db_adapter = SQLAlchemyAdapter(db, User)
user_manager = UserManager(db_adapter, app)
@app.route('/')
def patients():
if current_user.is_authenticated():
return redirect(url_for('profile'))
else:
return redirect(url_for('user.login'))
@app.route('/profile')
@login_required
def profile():
patients = Patient.query.with_entities(Patient.display_id).filter_by(
user_id=current_user.id).all()
return render_template(
'profile.html',
patients=patients)
def get_vcf_df(patient_id):
variants = Variant.query.with_entities(Variant.chr, Variant.pos,
Variant.ref, Variant.alt).filter_by(patient_id=patient_id).all()
vcf_df = DataFrame(variants, columns=['chr', 'pos', 'ref', 'alt'])
# TODO: I added this because downstream functions expect 'info', but this
# is silly and hacky.
vcf_df['info'] = Series([None] * len(vcf_df))
return vcf_df
def run_pipeline(patient_id, score_epitopes):
"""Run the pipeline for this patient, and save the output to the DB as a
Run."""
hla_types = HLAType.query.with_entities(HLAType.allele,
HLAType.mhc_class).filter_by(patient_id=patient_id).all()
peptide_length = 31
alleles = [normalize_hla_allele_name(
allele) for allele, mhc_class in hla_types]
vcf_df = get_vcf_df(patient_id)
transcripts_df, vcf_df, variant_report = expand_transcripts(
vcf_df,
patient_id,
min_peptide_length = peptide_length,
max_peptide_length = peptide_length)
scored_epitopes = score_epitopes(transcripts_df, alleles)
imm = ImmunogenicityPredictor(alleles=alleles)
scored_epitopes = imm.predict(scored_epitopes)
# TODO(tavi) Make this expansion more robust. It breaks the IEDB predictor,
# for example.
short_transcripts_df = transcripts_df[['chr', 'pos', 'ref',
'alt', 'TranscriptId']]
scored_epitopes = merge(scored_epitopes, short_transcripts_df,
on='TranscriptId', how='left')
peptides = group_epitopes_dataframe(
scored_epitopes, use_transcript_name = True)
run = Run(patient_id=patient_id, output=dumps(peptides))
db.session.add(run)
@app.route('/patient/<display_id>')
@login_required
def patient(display_id):
patient_id, display_id = Patient.query.with_entities(Patient.id,
Patient.display_id).filter_by(display_id=display_id).one()
output = Run.query.with_entities(Run.output).filter_by(
patient_id=patient_id).one()
return render_template('patient.html',
display_id=display_id,
peptides=loads(output[0]),
IC50_FIELD_NAME=IC
|
keyru/hdl-make
|
tests/counter/syn/proasic3_sk_libero/verilog/Manifest.py
|
Python
|
gpl-3.0
| 242
| 0.016529
|
target = "microsemi"
action = "synthesis"
|
syn_device = "a3p250"
syn_grade = "-2"
syn_package = "208 pqfp"
syn_top = "proasic3_top"
syn_project = "demo"
syn_t
|
ool = "libero"
modules = {
"local" : [ "../../../top/proasic3_sk/verilog" ],
}
|
hdzierz/Kaka
|
gene_expression/models.py
|
Python
|
gpl-2.0
| 644
| 0.001553
|
import mongoeng
|
ine
from mongcore.models import Feature, Species
from jsonfield import JSONField
# Create your models here.
class Target(Feature):
species = mongoengine.ReferenceField(Species, default=1)
kea_id = mongoengine.StringField(max_length=255)
ebrida_id = mongoengine.StringField(max_length=255)
file_name = mongoengine.StringField()
column = mongoengine.StringField(max_length=255)
condition = mongoengine.StringField(max_length=255)
lib_ty
|
pe = mongoengine.StringField(max_length=255)
class Gene(Feature):
gene_id = mongoengine.StringField(max_length=255)
length = mongoengine.IntField()
|
houshengbo/nova_vmware_compute_driver
|
nova/tests/baremetal/db/test_bm_interface.py
|
Python
|
apache-2.0
| 2,308
| 0.000433
|
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-metal DB testcase for BareMetalInterface
"""
from nova import exception
from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11
|
:11',
'0x1', 1)
self.assertRaises(exception.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
db.bm_interface_destroy(self.context, pif1_id)
pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
'0x2'
|
, 2)
self.assertTrue(pif2_id is not None)
def test_unique_vif_uuid(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
'0x2', 2)
db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif2_id, 'AAAA')
def test_vif_not_found(self):
pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif_id + 1, 'AAAA')
|
DailyActie/Surrogate-Model
|
01-codes/numpy-master/numpy/polynomial/tests/test_hermite_e.py
|
Python
|
mit
| 18,726
| 0
|
"""Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_he
|
rmedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%
|
d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0] * i + [1], [0] * j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0] * i + [1]
tgt = [0] * (i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0] * i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0] * j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1 * val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0] * i + [1]
cj = [0] * j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
# check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
# check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0] * i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
# test values
tgt = y1 * y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
# test values
tgt = y1 * y2 * y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3) * 2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3) * 3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0] * (i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [1 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0] * i + [1]
tgt = [i] + [0] * i + [2 / scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0] * i + [1]
tgt = pol[:]
for k in range(j):
|
anthonyclays/pyAPT
|
set_velocity_params.py
|
Python
|
mit
| 1,322
| 0.020424
|
#!/usr/bin/env python
"""
Usage: python get_status.py <acceleration (mm/s/s)> <max velocity (mm/s) [<serial>]
Gets the status of all APT controllers, or of the one specified
"""
import pylibftdi
import pyAPT
def set_vel_params(serial, acc, max_vel):
with pyAPT.MTS50(serial_number=serial) as con:
print '\tSetting new velocity parameters',acc,max_vel
con.set_velocity_parameters(acc, max_vel)
min_vel, acc, max_vel = con.velocity_parameters()
print '\tNew velocity parameters:'
print '\t\tMin. Velocity: %.2fmm'%(min_vel)
print '\t\tAcceleration: %.2fmm'%(acc)
print '\t\tMax. Velocity: %.2fmm'%(max_vel)
def main(args):
if len(args)<3:
print __doc__
return 1
acc = float(args[1])
max_vel = flo
|
at(args[2])
if
|
len(args)>3:
serial = args[3]
else:
serial = None
if serial:
set_vel_params(serial, acc, max_vel)
return 0
else:
print 'Looking for APT controllers'
drv = pylibftdi.Driver()
controllers = drv.list_devices()
if controllers:
for con in controllers:
print 'Found %s %s S/N: %s'%con
set_vel_params(con[2], acc, max_vel)
return 0
else:
print '\tNo APT controllers found. Maybe you need to specify a PID'
return 1
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
mitnk/letsencrypt
|
letsencrypt/errors.py
|
Python
|
apache-2.0
| 2,342
| 0.000854
|
"""Let's Encrypt client errors."""
class Error(Exception):
"""Generic Let's Encrypt client error."""
class AccountStorageError(Error):
"""Generic `.AccountStorage` error."""
class AccountNotFound(AccountStorageError):
"""Account not found error."""
class ReverterError(Error):
"""Let's Encrypt Reverter error."""
class SubprocessError(Error):
"""Subprocess handling error."""
class CertStorageError(Error):
"""Generic `.CertStorage` error."""
class HookCommandNotFound(Error):
"""Failed to find a hook command in the PATH."""
# Auth Handler Errors
class AuthorizationError(Error):
"""Authorization error."""
class FailedChallenges(AuthorizationError):
"""Failed challenges error.
:ivar set failed_achalls: Failed `.AnnotatedChallenge` instances.
"""
def __init__(self, failed_achalls):
assert failed_achalls
self.failed_achalls = failed_achalls
super(FailedChallenges, self).__init__()
def __str__(self):
return "Failed authorization procedure. {0}".format(
", ".join(
"{0} ({1}): {2}".format(achall.domain, achall.typ, achall.error)
for achall in self.failed_achalls if achall.error is not None))
# Plugin Errors
class PluginError(Error):
"""Let's Encrypt Plugin error."""
class PluginEnhancementAlreadyPresent(Error):
""" Enhancement was already set """
class PluginSelectionError(Error):
"""A problem with plugin/configurator selection or setup"""
class NoInstallationError(PluginError):
"""Let's Encrypt No Installation error."""
class MisconfigurationError(PluginError):
"""Let's Encrypt Misconfiguration error."""
class NotSupportedError(PluginError):
"""Let's Encrypt Plugin function not supported error."""
class StandaloneBindError(Error):
|
"""Standalone plugin bind error."""
def __init__(self, socket_error, port):
super(StandaloneBindError, self).__init__(
"Problem binding to port {0}: {1}".format(port, socket_error))
self.socket_error = socket_error
self.port = port
|
class ConfigurationError(Error):
"""Configuration sanity error."""
# NoninteractiveDisplay iDisplay plugin error:
class MissingCommandlineFlag(Error):
"""A command line argument was missing in noninteractive usage"""
|
RobotTurtles/mid-level-routines
|
Apps/TurtleCommands.py
|
Python
|
apache-2.0
| 1,101
| 0.006358
|
__author__ = 'Alex'
from Movement import Movement
class BaseCommand:
def __init__(self, movement):
|
assert isinstance(movement, Movement)
self.name = 'unknown'
self.m = movement
def execute(selfself):pass
class Forward(BaseCommand):
def __init__(self, movement):
|
assert isinstance(movement, Movement)
self.name = 'forward'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Reverse(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'reverse'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Left(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'left'
self.m = movement
def execute(self):
self.m.turnDegrees(-90)
class Right(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'right'
self.m = movement
def execute(self):
self.m.turnDegrees(90)
|
ansible/ansible-lint
|
src/ansiblelint/rules/MercurialHasRevisionRule.py
|
Python
|
mit
| 1,951
| 0
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from typing import TYPE_CHECKING, Any, Dict, Union
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from typing import Optional
from ansiblelint.file_utils import Lintable
class MercurialHasRevisionRule(AnsibleLintRule):
id = 'hg-latest'
shortdesc = 'Mercurial che
|
ckouts must contain
|
explicit revision'
description = (
'All version control checkouts must point to '
'an explicit commit or tag, not just ``latest``'
)
severity = 'MEDIUM'
tags = ['idempotency']
version_added = 'historic'
def matchtask(
self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
) -> Union[bool, str]:
return bool(
task['action']['__ansible_module__'] == 'hg'
and task['action'].get('revision', 'default') == 'default'
)
|
AlexandroPQC/django_discusion
|
SistemaDiscusiones/home/urls.py
|
Python
|
gpl-3.0
| 118
| 0
|
fr
|
om django.conf.urls import url
from .views import Inde
|
xView
urlpatterns = [
url(r'^$', IndexView.as_view()),
]
|
c4fcm/DataBasic
|
databasic/logic/tfidfanalysis.py
|
Python
|
mit
| 4,459
| 0.01054
|
import os
import codecs, re, time, string, logging, math
from operator import itemgetter
from nltk import FreqDist
from nltk.corpus import stopwords
import textmining
from scipy import spatial
from . import filehandler
def most_frequent_terms(*args):
tdm = textmining.TermDocumentMatrix(simple_tokenize_remove_our_stopwords)
for doc in args:
tdm.add_doc(doc)
freqs = []
for d in tdm.sparse:
f = [(freq, name) for (name, freq) in list(d.items())]
f.sort(reverse=True)
freqs.append(f)
return freqs
def doc_to_words(document):
'''
Turn a document into a list of all the words in it
# TODO: include word stemming
'''
t1 = time.time()
words = re.findall(r"[\w']+|[.,!?;]", document, re.UNICODE)
t2 = time.time()
words = [w.lower() for w in words]
t3 = time.time()
words = [w for w in words if not w in string.punctuation]
t4 = time.time()
logging.debug(" tokenize: %d" % (t2-t1))
logging.debug(" ignore_case: %d" % (t3-t2))
logging.debug(" remove punctuation: %d" % (t4-t3))
return words
# TODO add a langauge param to remove spanish stop words too
def term_frequency(words):
'''
Turn a list of words into a NLTK frequency distribution object
'''
t1 = time.time()
fdist = FreqDist(words)
# remove stopwords here rather than in corpus text for speed
# http://stackoverflow.com/questions/7154312/how-do-i-remove-entries-within-a-counter-object-with-a-loop-without-invoking-a-r
for w in list(fdist):
if w in stopwords.words('english'):
del fdist[w]
t2 = time.time()
logging.debug(" create term freq: %d" % (t2-t1))
return fdist
def _count_incidence(lookup, term):
if term in lookup:
lookup[term] += 1
else:
lookup[term] = 1
def inverse_document_frequency(list_of_fdist_objects):
'''
Turn a list of words lists into a document frequency
'''
doc_count = len(list_of_fdist_objects)
term_doc_incidence = {}
t1 = time.time()
[_count_incidence(term_doc_incidence,term) \
for fdist in list_of_fdist_objects \
for term in list(fdist.keys()) ]
t2 = time.time()
idf = { term: math.log(float(doc_count)/float(incidence)) for term, incidence in term_doc_incidence.items() }
t3 = time.time()
logging.debug(" create df: %d" % (t2-t1))
logging.debug(" create idf: %d" % (t3-t2))
return idf
def tf_idf(list_of_file_paths):
'''
Compute and return tf-idf from a list of file paths (sorted by tfidf desc)
'''
doc_list = [ filehandler.convert_to_txt(file_path) for file_path in list_of_file_paths ]
tf_list = [ term_frequency( doc_to_words(doc) ) for doc in doc_list ] # a list of FreqDist objects
idf = inverse_document_frequency(tf_list)
tf_idf_list = [ [{'term':term, 'tfidf':frequency*idf[term], 'frequency': frequency} for term, frequency in tf.items()] for tf in tf_list ]
tf_idf_list = [ sorted(tf_idf, key=itemgetter('tfidf'), reverse=True) for tf_idf in tf_idf_list ]
return tf_idf_list
def simple_tokenize_remove_our_stopwords(document):
"""
Clean up a document and split into a list of words, removing stopwords.
Converts document (a string) to lowercase and strips out everything
which is not a lowercase letter. Then removes stopwords.
"""
document = document.lower()
document = re.sub('[^a-z\']', ' ', document)
words = document.strip().split()
# Remove stopwords
words = [word for word in words if word not in stopwords.words('english')]
return words
def cosine_similarity(l
|
ist_of_file_paths):
# Create some very short sample documents
doc_list = [ filehandler.convert_to_txt(file_path) for file_path in list_of_file_paths ]
# Initialize class to create term-document matrix
tdm = textmining.TermDocumentMatrix(tokenizer=simple_tokenize_remove_our_stopwords)
for doc in doc_list:
tdm.add_doc(doc)
results = []
is_first_row1 = True
|
for row1 in tdm.rows(cutoff=1):
if is_first_row1:
is_first_row1 = False
continue
is_first_row2 = True
cols = []
for row2 in tdm.rows(cutoff=1):
if is_first_row2:
is_first_row2 = False
continue
cols.append( 1 - spatial.distance.cosine(row1,row2) )
results.append(cols)
return results
|
maheshp212/find-to-run
|
findtorun/findtorun/settings.py
|
Python
|
apache-2.0
| 3,920
| 0.001276
|
"""
Django settings for findtorun project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3@i55e)e-m8af#@st3n98!$64fe-3ti-6o=j5g*k%3n6ri9yx!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG = True
log_level = 'DEBUG'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'formatters': {
'simple': {
'format': '%(filename)s %(lineno)d %(asctime)s %(levelname)s %(message)s'
}
},
'loggers': {
'find2run': {
'handlers': ['console'],
'level': log_level,
},
},
}
ALLOWED_HOSTS = [
'ec2-54-193-111-20.us-west-1.compute.amazonaws.com',
'localhost',
'api.findtorun.fun'
]
# Application definition
INSTALLED_APPS = [
'find2run.apps.Find2RunConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'findtorun.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backend
|
s.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'findtorun.wsgi.application'
# Database
# htt
|
ps://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'findtorun',
'USER': 'findtorun_user',
'PASSWORD': 'p@$$w0rd111!!!',
'HOST': 'localhost',
'PORT': '5432',
'TEST': {
'NAME': 'findtorun_test'
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
philipgian/pre-commit
|
pre_commit/output.py
|
Python
|
mit
| 2,217
| 0
|
from __future__ import unicode_literals
import sys
from pre_commit import color
from pre_commit import five
def get_hook_message(
start,
postfix='',
end_msg=None,
end_len=0,
end_color=None,
use_color=None,
cols=80,
):
"""Prints a message for running a hook.
This currently supports three approaches:
# Print `start` followed by dots, leaving 6 characters at the end
>>> print_hook_message('start', end_len=6)
start...............................................................
# Print `start` followed by dots with the end message colored if coloring
# is specified and a newline afterwards
>>> print_hook_message(
'start',
end_msg='end',
end_color=color.RED,
use_color=True,
)
start...................................................................end
# Print `start` followed by dots, followed by the `postfix` message
# uncolored, followed by the `end_msg` colored if specified and a newline
# afterwards
>>> print_hook_message(
'start',
postfix='postfix ',
end_msg='end',
|
end_color=color.RED,
use_color=True,
)
start...........................................................postfix end
"""
if bool(end_msg) == bool(end_len):
raise ValueError('Expected one of (`end_msg`, `end_len`)')
if end_msg is not None and (end_color is None or use_color is None):
raise ValueError(
'`end_color` and `use_color` are required with `end_msg`'
)
if end_len:
return start + '.' * (cols - len(start) - end_len - 1)
|
else:
return '{}{}{}{}\n'.format(
start,
'.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
postfix,
color.format_color(end_msg, end_color, use_color),
)
stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
def write(s, stream=stdout_byte_stream):
stream.write(five.to_bytes(s))
stream.flush()
def write_line(s=None, stream=stdout_byte_stream):
if s is not None:
stream.write(five.to_bytes(s))
stream.write(b'\n')
stream.flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.