repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
uclouvain/OSIS-Louvain
|
base/tests/factories/group_element_year.py
|
Python
|
agpl-3.0
| 2,912
| 0.002061
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, o
|
r
# (at your option) any later version.
#
# This program is distributed in the hope that it wil
|
l be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import operator
import string
import random
import factory.fuzzy
from base.models.enums.quadrimesters import DerogationQuadrimester
from base.tests.factories.utils.fuzzy import FuzzyBoolean
from program_management.tests.factories.element import ElementGroupYearFactory, ElementLearningUnitYearFactory
def _generate_block_value():
"""Generate a random string composed of digit between 1 and 6 included.
Each digit can be represented at most once in the string and they are sorted from smallest to greatest.
Ex: "", "156", "2", "456" and so on
"""
population = list(range(1, 7))
k = random.randint(0, len(population))
sample = random.sample(population, k)
sample.sort()
return int("".join([str(element) for element in sample])) if sample else None
class GroupElementYearFactory(factory.django.DjangoModelFactory):
class Meta:
model = "base.GroupElementYear"
django_get_or_create = ('parent_element', 'child_element')
external_id = factory.fuzzy.FuzzyText(length=10, chars=string.digits)
changed = factory.fuzzy.FuzzyNaiveDateTime(datetime.datetime(2016, 1, 1), datetime.datetime(2017, 3, 1))
parent_element = factory.SubFactory(ElementGroupYearFactory)
child_element = factory.SubFactory(ElementGroupYearFactory)
relative_credits = factory.fuzzy.FuzzyInteger(0, 10)
is_mandatory = FuzzyBoolean()
link_type = None
order = None
block = factory.LazyFunction(_generate_block_value)
class GroupElementYearChildLeafFactory(GroupElementYearFactory):
child_element = factory.SubFactory(ElementLearningUnitYearFactory)
|
chemlab/chemlab
|
chemlab/core/spacegroup/crystal.py
|
Python
|
gpl-3.0
| 7,982
| 0.003884
|
# Adapted from ASE https://wiki.fysik.dtu.dk/ase/
#
#
# Copyright (C) 2010, Jesper Friis
# (see accompanying license files for details).
"""
A module for chemlab for simple creation of crystalline structures from
knowledge of the space group.
"""
import numpy as np
from collections import Counter
from .spacegroup import Spacegroup
from ..system import System
from .cell import cellpar_to_cell
__all__ = ['crystal']
def crystal(positions, molecules, group,
cellpar=[1.0, 1.0, 1.0, 90, 90, 90], repetitions=[1, 1, 1]):
'''Build a crystal from atomic positions, space group and cell
parameters.
**Parameters**
positions: list of coordinates
A list of the atomic positions
molecules: list of Molecule
The molecules corresponding to the positions, the molecule will be
translated in all the equivalent positions.
group: int | str
Space group given either as its number in International Tables
or as its Hermann-Mauguin symbol.
repetitions:
Repetition of the unit cell in each direction
cellpar:
Unit cell parameters
This function was taken and adapted from the *spacegroup* module
found in `ASE <https://wiki.fysik.dtu.dk/ase/>`_.
The module *spacegroup* module was originally developed by Jesper
Frills.
'''
sp = Spacegroup(group)
sites, kind = sp.equivalent_sites(positions)
nx, ny, nz = repetitions
reptot = nx*ny*nz
# Unit cell parameters
a,b,c = cellpar_to_cell(cellpar)
cry = System()
i = 0
with cry.batch() as batch:
for x in range(nx):
for y in range(ny):
for z in range(nz):
for s, ki in zip(sites, kind):
tpl = molecules[ki]
tpl.move_to(s[0]*a +s[1]*b + s[2]*c + a*x + b*y + c*z)
batch.append(tpl.copy())
# Computing the box_vectors
cry.box_vectors = np.array([a*nx, b*ny, c*nz])
return cry
# def crystal(symbols=None, basis=None, spacegroup=1, setting=1,
# cell=None, cellpar=None,
# ab_normal=(0,0,1), a_direction=None, size=(1,1,1),
# ondublicates='warn', symprec=0.001,
# pbc=True, primitive_cell=False, **kwargs):
# """Create a System instance for a conventional unit cell of a
# space group.
# Parameters:
# symbols : str | sequence of str | sequence of Atom | Atoms
# Element symbols of the unique sites. Can either be a string
# formula or a sequence of element symbols. E.g. ('Na', 'Cl')
# and 'NaCl' are equivalent. Can also be given as a sequence of
# Atom objects or an Atoms object.
# basis : list of scaled coordinates
# Positions of the unique sites corresponding to symbols given
# either as scaled positions or through an atoms instance. Not
# needed if *symbols* is a sequence of Atom objects or an Atoms
# object.
# spacegroup : int | string | Spacegroup instance
# Space group given either as its number in International Tables
# or as its Hermann-Mauguin symbol.
# setting : 1 | 2
# Space group setting.
# cell : 3x3 matrix
# Unit cell vectors.
# cellpar : [a, b, c, alpha, beta, gamma]
# Cell parameters with angles in degree. Is not used when `cell`
# is given.
# ab_normal : vector
# Is used to define the orientation of the unit cell relative
# to the Cartesian system when `cell` is not given. It is the
# normal vector of the plane spanned by a and b.
# a_direction : vector
# Defines the orientation of the unit cell a vector. a will be
# parallel to the projection of `a_direction` onto the a-b plane.
# size : 3 positive integers
# How many times the conventional unit cell should be repeated
# in each direction.
# ondublicates : 'keep' | 'replace' | 'warn' | 'error'
# Action if `basis` contain symmetry-equivalent positions:
# 'keep' - ignore additional symmetry-equivalent positions
# 'replace' - replace
# 'warn' - like 'keep', but issue an UserWarning
# 'error' - raises a SpacegroupValueError
# symprec : float
# Minimum "distance" betweed two sites in scaled coordinates
# before they are counted as the same site.
# pbc : one or three bools
# Periodic boundary conditions flags. Examples: True,
# False, 0, 1, (1, 1, 0), (True, False, False). Default
# is True.
# primitive_cell : bool
# Wheter to return the primitive instead of the conventional
# unit cell.
# Keyword arguments:
# All additional keyword arguments are passed on to the Atoms
# constructor. Currently, probably the most useful additional
# keyword arguments are `info`, `constraint` and `calculator`.
# Examples:
# Two diamond unit cells (space group number 227)
# >>> diamond = crystal('C', [(0,0,0)], spacegroup=227,
# ... cellpar=[3.57, 3.57, 3.57, 90, 90, 90], size=(2,1,1))
# >>> ase.view(diamond) # doctest: +SKIP
# A CoSb3 skutterudite unit cell containing 32 atoms
# >>> skutterudite = crystal(('Co', 'Sb'),
# ... basis=[(0.25,0.25,0.25), (0.0, 0.335, 0.158)],
# ... spacegroup=204, cellpar=[9.04, 9.04, 9.04, 90, 90, 90])
# >>> len(skutterudite)
# 32
# """
# sg = Spacegroup(spacegroup, setting)
# if (not isinstance(symbols, str) and
# hasattr(symbols, '__getitem__') and
# len(symbols) > 0 and
# isinstance(symbols[0], ase.Atom)):
# symbols = ase.Atoms(symbols)
# if isinstance(symbols, ase.Atoms):
# basis = symbols
# symbols = basis.get_chemical_symbols()
# if isinstance(basis, ase.Atoms):
# basis_coords = basis.get_scaled_positions()
# if cell is None and cellpar is None:
# cell = basis.cell
# if symbols is None:
# symbols = basis.get_chemical_symbols()
# else:
# basis_coords = np.array(basis, dtype=float, copy=False, ndmin=2)
# sites, kinds = sg.equivalent_sites(basis_coords,
# ondublicates
|
=ondublicates,
# symprec=symprec)
# symbols = parse_symbols(symbols)
# symbols = [symbols[i] for i in kinds]
# if cell is None:
# cell = cellpar_to_cell(cellpar, ab_normal, a_direction)
# info = dict(spacegroup=sg)
# if primitive_cell:
#
|
info['unit_cell'] = 'primitive'
# else:
# info['unit_cell'] = 'conventional'
# if 'info' in kwargs:
# info.update(kwargs['info'])
# kwargs['info'] = info
# atoms = ase.Atoms(symbols,
# scaled_positions=sites,
# cell=cell,
# pbc=pbc,
# **kwargs)
# if isinstance(basis, ase.Atoms):
# for name in basis.arrays:
# if not atoms.has(name):
# array = basis.get_array(name)
# atoms.new_array(name, [array[i] for i in kinds],
# dtype=array.dtype, shape=array.shape[1:])
# if primitive_cell:
# from ase.utils.geometry import cut
# prim_cell = sg.scaled_primitive_cell
# atoms = cut(atoms, a=prim_cell[0], b=prim_cell[1], c=prim_cell[2])
# if size != (1, 1, 1):
# atoms = atoms.repeat(size)
# return atoms
# def parse_symbols(symbols):
# """Return `sumbols` as a sequence of element symbols."""
# if isinstance(symbols, basestring):
# symbols = string2symbols(symbols)
# return symbols
#-----------------------------------------------------------------
# Self test
if __name__ == '__main__':
import doctest
#print 'doctest: ', doctest.testmod()
|
DarkPurpleShadow/ConnectFour
|
urwid/lcd_display.py
|
Python
|
bsd-3-clause
| 16,440
| 0.003343
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid LCD display module
# Copyright (C) 2010 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from .display_common import BaseScreen
import time
class LCDScreen(BaseScreen):
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
pass
def set_mouse_tracking(self, enable=True):
pass
def start(self):
pass
def stop(self):
pass
def
|
set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def run_wrapper(self,fn):
return fn()
def draw_screen(self, xxx_todo_changeme, r ):
(cols, rows) = xxx_todo_changeme
pass
def clear(self):
pass
def get_cols_rows(self):
return self.DISPLAY_SIZE
class
|
CFLCDScreen(LCDScreen):
"""
Common methods for Crystal Fontz LCD displays
"""
KEYS = [None, # no key with code 0
'up_press', 'down_press', 'left_press',
'right_press', 'enter_press', 'exit_press',
'up_release', 'down_release', 'left_release',
'right_release', 'enter_release', 'exit_release',
'ul_press', 'ur_press', 'll_press', 'lr_press',
'ul_release', 'ur_release', 'll_release', 'lr_release']
CMD_PING = 0
CMD_VERSION = 1
CMD_CLEAR = 6
CMD_CGRAM = 9
CMD_CURSOR_POSITION = 11 # data = [col, row]
CMD_CURSOR_STYLE = 12 # data = [style (0-4)]
CMD_LCD_CONTRAST = 13 # data = [contrast (0-255)]
CMD_BACKLIGHT = 14 # data = [power (0-100)]
CMD_LCD_DATA = 31 # data = [col, row] + text
CMD_GPO = 34 # data = [pin(0-12), value(0-100)]
# sent from device
CMD_KEY_ACTIVITY = 0x80
CMD_ACK = 0x40 # in high two bits ie. & 0xc0
CURSOR_NONE = 0
CURSOR_BLINKING_BLOCK = 1
CURSOR_UNDERSCORE = 2
CURSOR_BLINKING_BLOCK_UNDERSCORE = 3
CURSOR_INVERTING_BLINKING_BLOCK = 4
MAX_PACKET_DATA_LENGTH = 22
colors = 1
has_underline = False
def __init__(self, device_path, baud):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
"""
super(CFLCDScreen, self).__init__()
self.device_path = device_path
from serial import Serial
self._device = Serial(device_path, baud, timeout=0)
self._unprocessed = ""
@classmethod
def get_crc(cls, buf):
# This seed makes the output of this shift based algorithm match
# the table based algorithm. The center 16 bits of the 32-bit
# "newCRC" are used for the CRC. The MSB of the lower byte is used
# to see what bit was shifted out of the center 16 bit CRC
# accumulator ("carry flag analog");
newCRC = 0x00F32100
for byte in buf:
# Push this byte’s bits through a software
# implementation of a hardware shift & xor.
for bit_count in range(8):
# Shift the CRC accumulator
newCRC >>= 1
# The new MSB of the CRC accumulator comes
# from the LSB of the current data byte.
if ord(byte) & (0x01 << bit_count):
newCRC |= 0x00800000
# If the low bit of the current CRC accumulator was set
# before the shift, then we need to XOR the accumulator
# with the polynomial (center 16 bits of 0x00840800)
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# All the data has been done. Do 16 more bits of 0 data.
for bit_count in range(16):
# Shift the CRC accumulator
newCRC >>= 1
# If the low bit of the current CRC accumulator was set
# before the shift we need to XOR the accumulator with
# 0x00840800.
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# Return the center 16 bits, making this CRC match the one’s
# complement that is sent in the packet.
return ((~newCRC)>>8) & 0xffff
def _send_packet(self, command, data):
"""
low-level packet sending.
Following the protocol requires waiting for ack packet between
sending each packet to the device.
"""
buf = chr(command) + chr(len(data)) + data
crc = self.get_crc(buf)
buf = buf + chr(crc & 0xff) + chr(crc >> 8)
self._device.write(buf)
def _read_packet(self):
"""
low-level packet reading.
returns (command/report code, data) or None
This method stored data read and tries to resync when bad data
is received.
"""
# pull in any new data available
self._unprocessed = self._unprocessed + self._device.read()
while True:
try:
command, data, unprocessed = self._parse_data(self._unprocessed)
self._unprocessed = unprocessed
return command, data
except self.MoreDataRequired:
return
except self.InvalidPacket:
# throw out a byte and try to parse again
self._unprocessed = self._unprocessed[1:]
class InvalidPacket(Exception):
pass
class MoreDataRequired(Exception):
pass
@classmethod
def _parse_data(cls, data):
"""
Try to read a packet from the start of data, returning
(command/report code, packet_data, remaining_data)
or raising InvalidPacket or MoreDataRequired
"""
if len(data) < 2:
raise cls.MoreDataRequired
command = ord(data[0])
plen = ord(data[1])
if plen > cls.MAX_PACKET_DATA_LENGTH:
raise cls.InvalidPacket("length value too large")
if len(data) < plen + 4:
raise cls.MoreDataRequired
crc = cls.get_crc(data[:2 + plen])
pcrc = ord(data[2 + plen]) + (ord(data[3 + plen]) << 8 )
if crc != pcrc:
raise cls.InvalidPacket("CRC doesn't match")
return (command, data[2:2 + plen], data[4 + plen:])
class KeyRepeatSimulator(object):
"""
Provide simulated repeat key events when given press and
release events.
If two or more keys are pressed disable repeating until all
keys are released.
"""
def __init__(self, repeat_delay, repeat_next):
"""
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
"""
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.pressed = {}
self.multiple_pressed = False
def press(self, key):
if self.pressed:
self.multiple_pressed = True
self.pressed[key] = time.time()
def release(self, key):
if key not in self.pressed:
return # ignore extra release events
del self.pressed[key]
if not self.pressed:
self.multiple_pressed = False
def next_event(self):
"""
Return (remaining, key) where remaining is the number of seconds
(float) until the key repeat event should be sent, or None if no
events are pending.
"""
if len(self.pressed) != 1 or self.multiple_pressed:
return
for key in self.pressed:
|
edx/ecommerce
|
ecommerce/core/migrations/0007_auto_20151005_1333.py
|
Python
|
agpl-3.0
| 780
| 0.002564
|
# -*- coding: utf-8 -*-
from django.db import migrations
def cre
|
ate_switch(apps, schema_editor):
"""Create the async_order_fulfillment switch if it does not already exist."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.get_or_create(name='async_order_fulfillment', defaults={'active': False})
def delete_switch(apps, schema_editor):
"""Delete the async_order_fulfillment switch."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.filter(name='async_order_fulfil
|
lment').delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0006_add_service_user'),
('waffle', '0001_initial'),
]
operations = [
migrations.RunPython(create_switch, reverse_code=delete_switch),
]
|
zfrxiaxia/Code-zfr
|
visualgo数据结构/01_sort.py
|
Python
|
gpl-3.0
| 1,847
| 0.045529
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Created on Sun Sep 18 20:24:29 2016
"""
list1 = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]
list2 = [1,1,1,1,1,1,1,1]
list3 = [1,2,3,4,5,6,7,8]
list4 = [2,3,6,7,5,2,2,2]
list5 = [8,7,6,5,4,3,2,1]
#检查函数
def check(func):
print sort_bubble(list1)==func(list1)
print sort_bubble(list2)==func(list2)
print sort_bubble(list3)==func(list3)
print sort_bubble(list4)==func(list4)
#冒泡
def sort_bubble(l):
while True:
swapped = False
for i in range(len(l)-1):
if l[i]>l[i+1]:
l[i],l[i+1] = l[i+1],l[i]
swapped = True
if swapped == False:
break;
return l
#选择
def sort_select(l):
for i in range(len(l)-1):
min_num = l[i]
index_min = i
for j in range(i,len(l)):
if l[j]<min_num:
min_num = l[j]
index_min = j
l[i],l[index_min] = l[index_min],l[i]
return l
#插入
def sort_insert(l):
for i in range(1,len(l)):
temp = l[i]
del l[i]
for j in range(i-1,-1,-1):
if j==0 and l[j] > temp:
l.insert(0,temp)
elif l[j] > temp:
pass
else:
l.insert(j+1,temp)
break
return l
#归并
def sort_merge(l):
if len(l) <= 1:
return l
num = int( len(l)/2 )
left = sort_merge(l[:num])
right = sort_merge(l[num:])
return Merge(left, right)
def Merge(left,right):
r, l=0, 0
result=[]
while l<l
|
en(left) and r<len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.appen
|
d(right[r])
r += 1
result += right[r:]
result+= left[l:]
return result
#
|
timevortexproject/timevortex
|
features/__init__.py
|
Python
|
mit
| 23
| 0
|
"""Features
|
modules"""
| |
destijl/grr
|
grr/lib/aff4_objects/hardware.py
|
Python
|
apache-2.0
| 334
| 0.005988
|
#!/usr/bin/env python
"""AFF4 objects for managing Chipsec responses."""
from grr.client.components.chipsec_support.actions import chipsec_types
from grr.lib.aff4_objects import collects
class ACPITabl
|
eDataC
|
ollection(collects.RDFValueCollection):
"""A collection of ACPI table data."""
_rdf_type = chipsec_types.ACPITableData
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HDFS/package/alerts/alert_checkpoint_time.py
|
Python
|
apache-2.0
| 10,596
| 0.014062
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import urllib2
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import logging
import traceback
from resource_management.libraries.functions.namenode_ha_utils import get_al
|
l_namenode_addresses
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
from resource_management.core.environment import Environment
LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
HDFS_SITE_KEY = '{{hdfs-site}}'
RESULT_STATE_UNKNOWN = 'UNKNOWN'
RESULT_STATE_SKIPPED = 'SKIPPED'
NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
PERCENT_WARNING_KEY = 'checkpoint.time.warning.threshold'
PERCENT_WARNING_DEFAULT = 200
PERCENT_CRITICAL_KEY = 'checkpoint.time.critical.threshold'
PERCENT_CRITICAL_DEFAULT = 200
CHECKPOINT_TX_MULTIPLIER_WARNING_KEY = 'checkpoint.txns.multiplier.warning.threshold'
CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT = 2
CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY = 'checkpoint.txns.multiplier.critical.threshold'
CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT = 4
CHECKPOINT_TX_DEFAULT = 1000000
CHECKPOINT_PERIOD_DEFAULT = 21600
CONNECTION_TIMEOUT_KEY = 'connection.timeout'
CONNECTION_TIMEOUT_DEFAULT = 5.0
KERBEROS_KEYTAB = '{{hdfs-site/dfs.web.authentication.kerberos.keytab}}'
KERBEROS_PRINCIPAL = '{{hdfs-site/dfs.web.authentication.kerberos.principal}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
SMOKEUSER_KEY = "{{cluster-env/smokeuser}}"
EXECUTABLE_SEARCH_PATHS = '{{kerberos-env/executable_search_paths}}'
logger = logging.getLogger('ambari_alerts')
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (HDFS_SITE_KEY, NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, EXECUTABLE_SEARCH_PATHS,
NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY, KERBEROS_KEYTAB, KERBEROS_PRINCIPAL, SECURITY_ENABLED_KEY, SMOKEUSER_KEY)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
if configurations is None:
return (('UNKNOWN', ['There were no configurations supplied to the script.']))
uri = None
scheme = 'http'
http_uri = None
https_uri = None
http_policy = 'HTTP_ONLY'
checkpoint_tx = CHECKPOINT_TX_DEFAULT
checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
# hdfs-site is required
if not HDFS_SITE_KEY in configurations:
return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
if NN_HTTP_POLICY_KEY in configurations:
http_policy = configurations[NN_HTTP_POLICY_KEY]
if NN_CHECKPOINT_TX_KEY in configurations:
checkpoint_tx = configurations[NN_CHECKPOINT_TX_KEY]
if NN_CHECKPOINT_PERIOD_KEY in configurations:
checkpoint_period = configurations[NN_CHECKPOINT_PERIOD_KEY]
if SMOKEUSER_KEY in configurations:
smokeuser = configurations[SMOKEUSER_KEY]
executable_paths = None
if EXECUTABLE_SEARCH_PATHS in configurations:
executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
kerberos_keytab = None
if KERBEROS_KEYTAB in configurations:
kerberos_keytab = configurations[KERBEROS_KEYTAB]
kerberos_principal = None
if KERBEROS_PRINCIPAL in configurations:
kerberos_principal = configurations[KERBEROS_PRINCIPAL]
kerberos_principal = kerberos_principal.replace('_HOST', host_name)
# parse script arguments
connection_timeout = CONNECTION_TIMEOUT_DEFAULT
if CONNECTION_TIMEOUT_KEY in parameters:
connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
percent_warning = PERCENT_WARNING_DEFAULT
if PERCENT_WARNING_KEY in parameters:
percent_warning = float(parameters[PERCENT_WARNING_KEY])
percent_critical = PERCENT_CRITICAL_DEFAULT
if PERCENT_CRITICAL_KEY in parameters:
percent_critical = float(parameters[PERCENT_CRITICAL_KEY])
checkpoint_txn_multiplier_warning = CHECKPOINT_TX_MULTIPLIER_WARNING_DEFAULT
if CHECKPOINT_TX_MULTIPLIER_WARNING_KEY in parameters:
checkpoint_txn_multiplier_warning = float(parameters[CHECKPOINT_TX_MULTIPLIER_WARNING_KEY])
checkpoint_txn_multiplier_critical = CHECKPOINT_TX_MULTIPLIER_CRITICAL_DEFAULT
if CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY in parameters:
checkpoint_txn_multiplier_critical = float(parameters[CHECKPOINT_TX_MULTIPLIER_CRITICAL_KEY])
kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
# determine the right URI and whether to use SSL
hdfs_site = configurations[HDFS_SITE_KEY]
scheme = "https" if http_policy == "HTTPS_ONLY" else "http"
nn_addresses = get_all_namenode_addresses(hdfs_site)
for nn_address in nn_addresses:
if nn_address.startswith(host_name + ":"):
uri = nn_address
break
if not uri:
return (RESULT_STATE_SKIPPED, ['NameNode on host {0} not found (namenode adresses = {1})'.format(host_name, ', '.join(nn_addresses))])
current_time = int(round(time.time() * 1000))
last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
# start out assuming an OK status
label = None
result_code = "OK"
try:
if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
env = Environment.get_instance()
# curl requires an integer timeout
curl_connection_timeout = int(connection_timeout)
last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
kerberos_principal, last_checkpoint_time_qry,"checkpoint_time_alert", executable_paths, False,
"NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])
journal_transaction_info_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
kerberos_principal, journal_transaction_info_qry,"checkpoint_time_alert", executable_paths,
False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
kinit_timer_ms = kinit_timer_ms)
|
dhyams/SALib
|
SALib/analyze/__main__.py
|
Python
|
lgpl-3.0
| 1,927
| 0.011936
|
from sys import exit
import argparse
import sobol, morris, extended_fast
parser = argparse.ArgumentParser(description='Perform sensitivity analysis on model output')
parser.add_argument('-m', '--method', type=str, choices=['sobol', 'morris', 'fast'], required=True)
parser.add_argument('-p', '--paramfile', type=str, required=True, help='Parameter range file')
parser.add_argument('-Y', '--model-output-file', type=str, required=True, help='Model output file')
parser.add_argument('-c', '--column', type=int, required=False, default=0, help='Column of output to analyze')
parser.add_argument('--delimiter', type=str, required=False, default=' ', help='Column delimiter in model output file')
parser.add_argument('--sobol-max-order', type=int, required=False, default=2, choices=[1, 2], help='Maximum order of sensitivity indices to calculate (Sobol only)')
parser.add_argument('-X', '--morris-model-input', type=str, required=False, default=None, help='Model inputs (required for Method of Morris only)')
parser.add_argument('-r', '--sobol-bootstrap-resamples', type=int, required=False, default=1000, help='Number of bootstrap resamples for Sobol confidence intervals')
args = parser.parse_args()
if args.method == 'sobol':
calc_second_order = (args.sobol_max_order == 2)
sobol.analyze(args.paramfile, args.model_output_file,
|
args.column, calc_second_order, num_resamples = args.sobol_bootstrap_resamples, delim = args.delimiter)
elif args.method == 'morris':
if args.m
|
orris_model_input is not None:
morris.analyze(args.paramfile, args.morris_model_input, args.model_output_file, args.column, delim = args.delimiter)
else:
print "Error: model input file is required for Method of Morris. Run with -h flag to see usage."
exit()
elif args.method == 'fast':
extended_fast.analyze(args.paramfile, args.model_output_file, args.column, delim = args.delimiter)
|
MadManRises/Madgine
|
shared/bullet3-2.89/examples/pybullet/examples/fileIOPlugin.py
|
Python
|
mit
| 457
| 0.017505
|
impo
|
rt pybullet as p
import time
p.connect(p.GUI)
fileIO = p.loadPlugin("fileIOPlugin")
if (fileIO >= 0):
p.executePluginCommand(fileIO, "pickup.zip", [p.AddFileIOAction, p.ZipFileIO])
objs = p.loadSDF("pickup/model.sdf")
dobot = objs[0]
p.changeVisualShape(dobot, -1, rgbaColor=[1, 1, 1, 1])
else:
print("fileIOPlugin is disabled.")
p.setPhysicsEngineParameter(enableFileCaching=Fals
|
e)
while (1):
p.stepSimulation()
time.sleep(1. / 240.)
|
vegarang/devilry-django
|
devilry/utils/tests/__init__.py
|
Python
|
bsd-3-clause
| 81
| 0.024691
|
fro
|
m streamable_archive_tests import *
from delivery_collection_tests import
|
*
|
rosudrag/eve-wspace
|
evewspace/account/models.py
|
Python
|
gpl-3.0
| 4,390
| 0.003189
|
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from django.core.cache import cache
from Map.models import Map, System
from django.db.models.signals import post_save
import pytz
import datetime
import time
# Create your models here.
class PlayTime(models.Model):
"""PlayTime represents a choice of play times for use in several forms."""
fromtime = models.TimeField()
totime = models.TimeField()
class UserProfile(models.Model):
"""UserProfile defines custom fields tied to each User record in the Django auth DB."""
user = models.ForeignKey(User, unique=True)
jabberid = models.EmailField(blank=True, null=True)
defaultmap = models.ForeignKey(Map, related_name = "defaultusers", blank=True, null=True)
playtimes = models.ManyToManyField(PlayTime)
currentsystem = models.ForeignKey(System, related_name="activepilots", blank=True, null=True)
lastactive = models.DateTimeField()
class Meta:
permissions = (('account_admin', 'Administer users and groups'),)
def update_location(self, sys_id, c
|
harid, charname, shipname, shiptype):
"""
Updates the cached locations dict for this
|
user.
"""
current_time = time.time()
user_cache_key = 'user_%s_locations' % self.user.pk
user_locations_dict = cache.get(user_cache_key)
time_threshold = current_time - (60 * 15)
location_tuple = (sys_id, charname, shipname, shiptype, current_time)
if user_locations_dict:
user_locations_dict.pop(charid, None)
user_locations_dict[charid] = location_tuple
else:
user_locations_dict = {charid: location_tuple}
# Prune dict to ensure we're not carrying over stale entries
for charid, location in user_locations_dict.items():
if location[4] < time_threshold:
user_locations_dict.pop(charid, None)
cache.set(user_cache_key, user_locations_dict, 60 * 15)
return user_locations_dict
class GroupProfile(models.Model):
"""GroupProfile defines custom fields tied to each Group record."""
group = models.OneToOneField(Group, related_name='profile')
description = models.CharField(max_length=200, blank=True, null=True)
regcode = models.CharField(max_length=64, blank=True, null=True)
visible = models.BooleanField(default=True)
def create_user_profile(sender, instance, created, **kwargs):
"""Handle user creation event and create a new profile to match the new user"""
if created:
UserProfile.objects.create(user=instance, lastactive=datetime.datetime.utcnow().replace(tzinfo=pytz.UTC))
post_save.connect(create_user_profile, sender=User)
def create_group_profile(sender, instance, created, **kwargs):
"""Handle group creation event and create a new group profile."""
if created:
GroupProfile.objects.create(group=instance)
post_save.connect(create_group_profile, sender=Group)
class RegistrationForm(UserCreationForm):
"""Extends the django registration form to add fields."""
username = forms.CharField(max_length=30, label="Username")
email = forms.EmailField(required=False, label="E-Mail Address (Optional)")
password2 = forms.CharField(widget=forms.PasswordInput, label="Confirm Password:")
regcode = forms.CharField(max_length=64, label="Registration Code")
|
goldhand/django-nupages
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 1,041
| 0.024976
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-nupages
------------
Tests for `django-nupages` models module.
"""
import os
import shutil
import unittest
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
|
from nupages import models
from nupages import views
class
|
TestNupages(unittest.TestCase):
def create_page(
self,
title="Test Page",
description="yes, this is only a test",
content="yes, this is only a test",
custom_template="",
site=Site.objects.create(domain="127.0.0.1:8000", name="127.0.0.1:8000")):
return models.Page.objects.create(
title=title,
description=description,
content=content,
custom_template=custom_template,
created=timezone.now(),
site=site)
def test_page_creation(self):
p = self.create_page()
self.assertTrue(isinstance(p, models.Page))
self.assertEqual(p.__unicode__(), p.title)
self.assertEqual(p.get_absolute_url(), reverse("nupages:detail", kwargs={'slug': p.slug}))
|
simone-campagna/statcode
|
lib/python/statcode/project_file.py
|
Python
|
apache-2.0
| 5,340
| 0.004307
|
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import os
import fnmatch
import collections
from .stats import FileStats
from .filetype_classifier import FileTypeClassifier
class ProjectFile(object):
def __init__(self, filepath, project_dir, filetype=None):
self.project_dir = project_dir
self.filetype_classifier = project_dir.project.filetype_classifier
self.filepath = filepath
self._filetypes = None
self.qualifiers = None
self.filetype = filetype
self.file_stats = None
def pre_classify(self):
qualifiers, self._filetypes = self.filetype_classifier.classify(self.filepath)
if qualifiers:
self.qualifiers = ";".join(qualifiers) + '-'
if self._filetypes is not None:
if len(self._filetypes) == 0:
self.filetype = FileTypeClassifier.FILETYPE_UNCLASSIFIED
elif len(self._filetypes) == 1:
self.filetype = next(iter(self._filetypes))
#print("PRE", self.filepath, self._filetypes, self.filetype)
def post_classify(self):
# if self.filepath.endswith(".h"):
# print("***", self.filepath, self.filetype, self._filetypes)
if self.filetype is None:
if not self._filetypes:
self.filetype = FileTypeClassifier.FILETYPE_UNCLASSIFIED
else:
self._filetypes = self.filetype_classifier.classify_by_content(self._filetypes, self.filepath)
if len(self._filetypes) == 0:
self.filetype = FileTypeClassifier.FILETYPE_UNCLASSIFIED
elif len(self._filetypes) == 1:
self.filetype = next(iter(self._filetypes))
else:
project_dir = self.project_dir
while project_dir:
for filetype in project_dir.most_common_filetypes():
assert not filetype in FileTypeClassifier.NO_FILETYPE_FILES
if filetype in self._filetypes:
self.filetype = filetype
#print("HERE A: ", self.filepath, self._filetypes, self.filetype, project_dir.dirpath)
break
else:
project_dir = project_dir.parent
continue
break
else:
#self.filetype = next(iter(self._filetypes))
self.filetype = next(iter(self._filetypes))
#print("HERE Z: ", self.filepath, self._filetypes, self.filetype)
# stats
if self.filetype in FileTypeClassifier.NON_EXISTENT_FILES:
self.file_stats = FileStats()
else:
block_size = self.project_dir.project.block_size
num_lines = 0
num_bytes = 0
newline = b'\n'
try:
with open(self.filepath, 'rb') as filehandle:
last_block = None
while True:
block = filehandle.read(block_size)
if not block:
break
last_block = block
num_bytes += len(block)
num_lines += block.count(newline)
if last_block and last_block[-1] != newline:
num_l
|
ines += 1
self.file_stats = FileStats(li
|
nes=num_lines, bytes=num_bytes)
except (OSError, IOError) as e:
self.filetype = FileTypeClassifier.FILETYPE_UNREADABLE
self.file_stats = FileStats()
try:
self.file_stats.bytes += os.stat(self.filepath).st_size
except:
pass
#if self.filetype_classifier.filetype_is_binary(self.filetype):
# self.file_stats = FileStats(bytes=os.stat(self.filepath).st_size)
#else:
# try:
# with open(self.filepath, 'r') as filehandle:
# num_lines = 0
# num_bytes = 0
# for line in filehandle:
# num_bytes += len(line)
# num_lines += 1
# self.file_stats = FileStats(lines=num_lines, bytes=num_bytes)
# except UnicodeDecodeError as e:
# self.filetype = FileTypeClassifier.FILETYPE_DATA
# self.file_stats = FileStats(bytes=os.stat(self.filepath).st_size)
#print("POST", self.filepath, self._filetypes, self.filetype)
|
Clivern/PyLogging
|
examples/custom_actions.py
|
Python
|
mit
| 773
| 0.01423
|
import pylogging
import os
# Logs Dir Absolute Path
logs_path = os.path.dirname(os.path.abspath(__file__)) + '/logs/'
# Create Logger Instance
logger = pylogging.PyLogging(LOG_FILE_PATH = logs_path)
def customAction1(type, msg):
# Custom
|
Action Goes Here
pass
# Add Action
actionIden1 = logger.addAction(customAction1)
def customAction2(type, msg):
# Custom Action Goes Here
pass
# Add Action
actionIden2 = logger.addAction(customAction2)
# To Remove Action1
logger.removeAction(actionIden1)
# Log Info Message
logger.info("Info Message")
# Log Warning Message
logger.warning("Warning Message.")
# Log Error Message
logger.error("Error Message.")
# Log Critical Message
logger.critic
|
al("Critical Message.")
# Log Normal Message
logger.log("Normal Log Message.")
|
NetstationMurator/django-treenav
|
treenav/forms.py
|
Python
|
bsd-3-clause
| 2,787
| 0.001794
|
from django import forms
from django.core.urlresolvers import reverse, NoReverseMatch
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import URLValidator
from treenav.models import MenuItem
from mptt.forms import TreeNodeChoiceField, MPTTAdminForm
class MenuItemFormMixin(object):
def clean_link(self):
link = self.cleaned_data['link'] or ''
# It could be a fully-qualified URL -- try that first b/c reverse()
# chokes on "http://"
if any([link.startswith(s) for s in ('http://', 'https://')]):
URLValidator()(link)
elif link and not any([link.startswith(s) for s in ('^', '/')]):
# Not a regex or site-root-relative absolute path -- see if it's a
# named URL or view
try:
reverse(link)
except NoReverseMatch:
raise forms.ValidationError('Please supply a valid URL, URL '
'name, or regular expression.')
return self.cleaned_data['link']
def clean(sel
|
f):
super(MenuItemFormMixin, self).clean()
content_type = self.cleaned_data['content_type']
object_id = self.cleaned_data['object_id']
if (content_type and not object_id) or (not content_type and object_id):
raise forms.ValidationError(
"Both 'Content type' and 'Object id' must be specified to use generic relationship"
)
if content_type and object_id:
|
try:
obj = content_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist, e:
raise forms.ValidationError(str(e))
try:
obj.get_absolute_url()
except AttributeError, e:
raise forms.ValidationError(str(e))
if 'is_enabled' in self.cleaned_data and \
self.cleaned_data['is_enabled'] and \
'link' in self.cleaned_data and \
self.cleaned_data['link'].startswith('^'):
raise forms.ValidationError('Menu items with regular expression '
'URLs must be disabled.')
return self.cleaned_data
class MenuItemForm(MenuItemFormMixin, MPTTAdminForm):
class Meta:
model = MenuItem
exclude = ()
class MenuItemInlineForm(MenuItemFormMixin, forms.ModelForm):
class Meta:
model = MenuItem
exclude = ()
class GenericInlineMenuItemForm(forms.ModelForm):
parent = TreeNodeChoiceField(
queryset=MenuItem.tree.all(),
required=False
)
class Meta:
model = MenuItem
fields = ('parent', 'label', 'slug', 'order', 'is_enabled')
|
ratschlab/ASP
|
examples/undocumented/python_modular/kernel_combined_modular.py
|
Python
|
gpl-2.0
| 1,900
| 0.036842
|
from tools.load import LoadMatrix
from numpy import double
lm=LoadMatrix()
traindat = double(lm.load_numbers('../data/fm_train_real.dat'))
testdat = double(lm.load_numbers('../data/fm_test_real.dat'))
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,traindna,testdna],[traindat,testdat,traindna,testdna]]
def kernel_combined_modular(fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=traindna,fm_test_dna=testdna ):
from shogun.Kernel import CombinedKernel, GaussianKernel, FixedDegreeStringKernel, LocalAlignmentStringKernel
from shogun.Features import RealFeatures, StringCharFeatures, CombinedFeatures, DNA
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=C
|
ombinedFeatures()
subkfeats_train=RealFeatures(fm_train_real)
subkfeats_test=RealFeatures(fm_test_real)
subkernel=GaussianKernel(10, 1.1)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
degree=3
subkernel=FixedDegreeStringKernel(10, degree
|
)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
subkernel=LocalAlignmentStringKernel(10)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Combined')
kernel_combined_modular(*parameter_list[0])
|
AdamStone/cryptrade
|
cryptrade/trading.py
|
Python
|
unlicense
| 30,686
| 0.000293
|
""" Tools for handling trade data, candle data, and simulating trades. """
from __future__ import division
import os
import csv
import time
from datetime import datetime, timedelta
import itertools
import traceback
from decimal import Decimal, getcontext
getcontext().prec = 8
from utilities import (TRADES, CANDLES, ut_to_dt, dt_to_ut,
build_data_directories, parse_period, pdelta,
get_candle, trades_to_candles, save_candlefile)
from api import BitfinexAPI
class TradeStream(object):
"""
A TradeStream collects live data from exchange API
(currently only Bitfinex API supported). If record_trades is True,
trades will be recorded to a local file.
Note that multiple TradeStreams should not be run simultaneously for the
same market, or duplicate trades will be written to the file.
"""
API_ACCESS = {'bitfinex': BitfinexAPI, 'bitstamp': BitfinexAPI}
def __init__(self, market='bitfinex_BTC_USD',
record_trades=True, quiet=False):
self.exchange, self.base, self.alt = (item.lower()
for item in market.split('_'))
self.symbol = self.base + self.alt
self.api = self.API_ACCESS[self.exchange]()
self.record_trades = record_trades
self.quiet = quiet
try:
with open(TRADES+'{}_{}_{}'.format(self.exchange,
self.base, self.alt), 'rb') as readfile:
reader = csv.reader(readfile, delimiter=',')
self.trades = [
{'timestamp': int(row[0]),
'price': Decimal(row[1]),
'amount': Decimal(row[2])}
for row in reader]
except:
self.trades = []
self.update()
def update(self):
self.new_trades = []
response = self.api.trades({}, self.symbol)
if response:
trades = sorted(response, key=lambda x: int(x['timestamp']))
new_trades = [{'timestamp': int(t['timestamp']),
'price': Decimal(t['price']),
'amount': Decimal(t['amount'])}
for t in trades
if t['timestamp'] > self.last_trade()['timestamp']
and t['exchange'] == self.exchange]
if new_trades:
self.new_trades = new_trades
# print each new trade, and add it to the
# trade file if record_trades==True
for trade in new_trades:
if not self.quiet:
print "{} {} {} {} {} {}".format(
ut_to_dt(trade['timestamp']), self.exchange,
trade['price'], self.alt, trade['amount'],
self.base)
self.trades.append({'timestamp': int(trade['timestamp']),
'price': Decimal(trade['price']),
'amount': Decimal(trade['amount'])})
self.price = self.trades[-1]['price']
# write new trades to tradefile
if self.record_trades:
tradefile = TRADES+'{}_{}_{}'.format(
self.exchange, self.base, self.alt)
if not os.path.exists(TRADES):
build_data_directories()
with open(tradefile, 'a') as writefile:
writer = csv.writer(writefile, delimiter=',')
writer.writerow([trade['timestamp'],
trade['price'], trade['amount']])
return self.new_trades
def run(self, update_every=15):
while True:
time.sleep(update_every)
try:
self.update()
except:
traceback.print_exc()
def last_trade(self):
if self.trades:
return self.trades[-1]
else:
return {'timestamp': 0}
class CandleStream(object):
""" A CandleStream converts trade data from a TradeSource to candle data
for a given period. Multiple candle streams can be run from the same
TradeSource. In that case, all the CandleStreams should be updated before
each new update of the TradeSource.
"""
def __init__(self, tradesource, period, record_candles=True,
start=None, quiet=False):
self.tradesource = tradesource
self.p_value, self.p_unit = parse_period(period)
self.period = period
self.step = pdelta(self.p_value, self.p_unit)
self.exchange, self.base, self.alt = (tradesource.exchange,
tradesource.base,
tradesource.alt)
self.candlefile = CANDLES + '{}_{}_{}_{}{}'.format(
self.exchange, self.base, self.alt, self.p_value, self.p_unit)
self.record_candles = record_candles
self.quiet = quiet
# check for candle directory
if not os.path.exists(CANDLES):
build_data_directories()
# check for candle file
if os.path.exists(self.candlefile):
with open(self.candlefile, 'rb') as readfile:
reader = csv.reader(readfile, delimiter=',')
if start:
self.closed_candles = [[int(candle[0])] + [Decimal(x)
for x in candle[1:]]
for candle in reader
if ut_to_dt(candle[0]) < start]
else:
self.closed_candles = [[int(candle[0])] + [Decimal(x)
for x in candle[1:]]
for candle in reader]
self.active_candle = self.closed_candles.pop()
# if no candle file, check for trades in tradesource
elif self.tradesource.trades:
if not self.quiet:
print 'No candlefile found; generating from tradesource...'
if start:
self.closed_candles = [[int(candle[0])] + [Decimal(x)
for x in candle[1:]]
for candle in trades_to_candles(
self.tradesource.trades, period)
if ut_to_dt(candle[0]) < start]
else:
self.closed_candles = [[int(candle[0])] + [Decimal(x)
for x in candle[1:]]
for candle in trades_to_candles(
self.tradesource.trades, period)]
# assume the last candle is still active
self.active_candle = self.closed_candles.pop()
# if no candles or trades
else:
if not self.quiet:
print ('No candlefile found; no tradefile found; '
'waiting for new trades...')
self.closed_candles = []
self.active_candle = []
self.active_trades = []
self.next_start = None
if self.active_candle: # at least one candle was found
self.next_start = ut_to_dt(self.active_candle[0]) + self.step
# assume last candle is not closed yet (check in update)
self.last_closed_known = False
# get trade data from most recent candle
self.active_trades = [
trade f
|
or trade in self.tradesource.trades
if trade['timestamp'] >= self.active_candle[0]]
def update(self):
|
""" Checks for new trades and updates the candle data. """
new_trades = self.tradesource.new_trades
if new_trades:
self.active_trades += [{'timestamp': int(trade['timestamp']),
'price': Decimal(trade['price']),
|
opensemanticsearch/open-semantic-search-apps
|
src/annotate/urls.py
|
Python
|
gpl-3.0
| 494
| 0.01417
|
from django.conf.urls import url
from annotate import views
urlpatterns = [
url(r'^$', vi
|
ews.IndexView.as_view(), name='index'),
url(r'^create$', views.create_annotation, name='create'),
url(r'^edit$', views.edit_annotation, name='edit'),
url(r'^json$', views.export_json, name='export_json'),
url(r'^rdf$', views.export_rdf, name='export_rdf'),
url(r'^(?P<pk>\d+)/update$', views.update_annotatio
|
n, name='update'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
]
|
bzennn/blog_flask
|
python/lib/python3.5/site-packages/pilkit/processors/__init__.py
|
Python
|
gpl-3.0
| 386
| 0
|
# flake8: noqa
"""
PILKit image processors.
A processor accepts an image, does some stuf
|
f, and returns the result.
Processors can do anything with the image you want, but their responsibilities
should be limited to image manipulations--the
|
y should be completely decoupled
from the filesystem.
"""
from .base import *
from .crop import *
from .overlay import *
from .resize import *
|
alexwaters/python-readability-api
|
examples/read-bookmarks.py
|
Python
|
mit
| 2,034
| 0.002458
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
read-bookmark.py
~~~~~~~~~~~~~~~~
This module is an example of how to harness the Readability API w/ oAuth.
This module expects the following environment variables to be set:
- READABILITY_CONSUMER_KEY
- READABILITY_CONSUMER_SECRET
- READABILITY_ACCESS_TOKEN
- READABILITY_ACCESS_SECRET
Once you have your consumer keys setup, run the
|
following to get your
access tokens::
$ ./login-xauth.py <username> <password>
"""
import sys
from HTMLParser import HTMLParser
from ext import setup_rdd
class MLStripper(HTMLParser):
"""HTMLParser w/ overrides for stripping text out."""
def __init__(self):
self.reset()
self.
|
fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
"""A super low-tech and debatably irresponsible attempt to turn HTML
into plain text."""
s = MLStripper()
s.feed(html)
data = s.get_data()
for s in ('\n\n\n\n\n', '\n\n\n\n', '\n\n\n', '\n', '\t'):
data = data.replace(s, '')
data = data.replace(' ', '')
return data
def main():
rdd = setup_rdd()
bookmarks = rdd.get_me().bookmarks(limit=10)
print 'Recent Bookmarks'
print '----------------\n'
for i, mark in enumerate(bookmarks):
print '%01d: %s (%s)' % (i, mark.article.title, mark.article.domain)
try:
selection = raw_input('\nRead Article (0-9)? ')
selection = int(selection)
assert (selection < 10) and (selection >= 0)
except (ValueError, AssertionError):
print >> sys.stderr, '\nEnter a number within 0-9, if you don\'t mind.'
except KeyboardInterrupt:
print >> sys.stderr, '\nWell, fine.'
sys.exit()
article = bookmarks[selection].article
article = rdd.get_article(article.id)
print article.title
print '-' * len(article.title) + '\n'
print strip_tags(article.content)
if __name__ == '__main__':
main()
|
mdiller/MangoByte
|
cogs/utils/metastats.py
|
Python
|
mit
| 738
| 0.004065
|
def get_h
|
ero_winrate(hero):
"""returns hero winrate from list of meta heroes"""
if hero['pro_pick'] == 0: return 0
else: return hero.get('pro_win', 0) / hero.get('pro_pick', 1)
def get_hero_pick_percent(hero, heroes):
return hero.get('pro_pick', 0) / get_total_pro_games(heroes)
def get_hero_ban_percent(hero, heroes):
return hero.get('pro_ban', 0) / get_total_pro_games(heroes)
def get_total_pro_games(heroes):
total = 0
for hero in heroes:
|
total += hero.get('pro_pick', 0) # sums total games in the list
total = total/10
return total
def get_hero_pickban_percent(hero, heroes):
return (
hero.get('pro_pick', 0) + hero.get('pro_ban', 0)
) / get_total_pro_games(heroes)
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/nltk/corpus/reader/reviews.py
|
Python
|
apache-2.0
| 12,534
| 0.002074
|
# Natural Language Toolkit: Product Reviews Corpus Reader
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
- Customer Review Corpus information -
Annotated by: Minqing Hu and Bing Liu, 2004.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Bing Liu, liub@cs.uic.edu
http://www.cs.uic.edu/~liub
Distributed with permission.
The "product_reviews_1" and "product_reviews_2" datasets respectively contain
annotated customer reviews of 5 and 9 products from amazon.com.
Related papers:
- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
Proceedings of the ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining (KDD-04), 2004.
- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
Proceedings of Nineteeth National Conference on Artificial Intelligence
(AAAI-2004), 2004.
- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
Opinion Mining." Proceedings of First ACM International Conference on Web
Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
Stanford, California, USA.
Symbols used in the annotated reviews:
[t] : the title of the review: Each [t] tag starts a review.
xxxx[+|-n]: xxxx is a product feature.
[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
Note that the strength is quite subjective.
You may want ignore it, but only considering + and -
[-n]: Negative opinion
## : start of each sentence. Each line is a sentence.
[u] : feature not appeared in the sentence.
[p] : feature not appeared in the sentence. Pronoun resolution is needed.
[s] : suggestion or recommendation.
[cc]: comparison with a competing product from a different brand.
[cs]: comparison with a comp
|
eting product from the same brand.
Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
provide separation between different reviews. This is due to the fact that
the dataset w
|
as specifically designed for aspect/feature-based sentiment
analysis, for which sentence-level annotation is sufficient. For document-
level classification and analysis, this peculiarity should be taken into
consideration.
"""
from __future__ import division
from six import string_types
import re
from nltk.corpus.reader.api import *
from nltk.tokenize import *
TITLE = re.compile(r'^\[t\](.*)$') # [t] Title
FEATURES = re.compile(r'((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]') # find 'feature' in feature[+3]
NOTES = re.compile(r'\[(?!t)(p|u|s|cc|cs)\]') # find 'p' in camera[+2][p]
SENT = re.compile(r'##(.*)$') # find tokenized sentence
@compat.python_2_unicode_compatible
class Review(object):
"""
A Review is the main block of a ReviewsCorpusReader.
"""
def __init__(self, title=None, review_lines=None):
"""
:param title: the title of the review.
:param review_lines: the list of the ReviewLines that belong to the Review.
"""
self.title = title
if review_lines is None:
self.review_lines = []
else:
self.review_lines = review_lines
def add_line(self, review_line):
"""
Add a line (ReviewLine) to the review.
:param review_line: a ReviewLine instance that belongs to the Review.
"""
assert isinstance(review_line, ReviewLine)
self.review_lines.append(review_line)
def features(self):
"""
Return a list of features in the review. Each feature is a tuple made of
the specific item feature and the opinion strength about that feature.
:return: all features of the review as a list of tuples (feat, score).
:rtype: list(tuple)
"""
features = []
for review_line in self.review_lines:
features.extend(review_line.features)
return features
def sents(self):
"""
Return all tokenized sentences in the review.
:return: all sentences of the review as lists of tokens.
:rtype: list(list(str))
"""
return [review_line.sent for review_line in self.review_lines]
def __repr__(self):
return 'Review(title=\"{}\", review_lines={})'.format(self.title, self.review_lines)
@compat.python_2_unicode_compatible
class ReviewLine(object):
"""
A ReviewLine represents a sentence of the review, together with (optional)
annotations of its features and notes about the reviewed item.
"""
def __init__(self, sent, features=None, notes=None):
self.sent = sent
if features is None:
self.features = []
else:
self.features = features
if notes is None:
self.notes = []
else:
self.notes = notes
def __repr__(self):
return ('ReviewLine(features={}, notes={}, sent={})'.format(
self.features, self.notes, self.sent))
class ReviewsCorpusReader(CorpusReader):
"""
Reader for the Customer Review Data dataset by Hu, Liu (2004).
Note: we are not applying any sentence tokenization at the moment, just word
tokenization.
>>> from nltk.corpus import product_reviews_1
>>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
>>> review = camera_reviews[0]
>>> review.sents()[0]
['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
>>> review.features()
[('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
('option', '+1')]
We can also reach the same information directly from the stream:
>>> product_reviews_1.features('Canon_G3.txt')
[('canon powershot g3', '+3'), ('use', '+2'), ...]
We can compute stats for specific product features:
>>> from __future__ import division
>>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> # We use float for backward compatibility with division in Python2.7
>>> mean = tot / n_reviews
>>> print(n_reviews, tot, mean)
15 24 1.6
"""
CorpusView = StreamBackedCorpusView
def __init__(self, root, fileids, word_tokenizer=WordPunctTokenizer(),
encoding='utf8'):
"""
:param root: The root directory for the corpus.
:param fileids: a list or regexp specifying the fileids in the corpus.
:param word_tokenizer: a tokenizer for breaking sentences or paragraphs
into words. Default: `WordPunctTokenizer`
:param encoding: the encoding that should be used to read the corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
def features(self, fileids=None):
"""
Return a list of features. Each feature is a tuple made of the specific
item feature and the opinion strength about that feature.
:param fileids: a list or regexp specifying the ids of the files whose
features have to be returned.
:return: all features for the item(s) in the given file(s).
:rtype: list(tuple)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.CorpusView(fileid, self._read_features, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"
|
yiwen-luo/LeetCode
|
Python/read-n-characters-given-read4-ii-call-multiple-times.py
|
Python
|
mit
| 1,921
| 0.006247
|
# Time: O(n)
# Space: O(1)
#
# The API: int read4(char *buf) reads 4 characters at a time from a file.
#
# The return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.
#
# By using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.
#
# Note:
# The read function may be called multiple times.
#
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
def read4(buf):
global file_content
i = 0
while i < len(file_content) and i < 4:
buf[i] = file_content[i]
i += 1
if len(file_content) > 4:
file_content = file_content[4:]
else:
file_content = ""
return i
# The read4 API is already defined for you.
# @param b
|
uf, a list of characters
# @return an integer
# def read4(buf):
class Solution(object):
def __init__(self):
self.__buf4 = [''] * 4
self.__i4 = 0
self.__n4 = 0
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
i = 0
while i < n:
if
|
self.__i4 < self.__n4: # Any characters in buf4.
buf[i] = self.__buf4[self.__i4]
i += 1
self.__i4 += 1
else:
self.__n4 = read4(self.__buf4) # Read more characters.
if self.__n4:
self.__i4 = 0
else: # Buffer has been empty.
break
return i
if __name__ == "__main__":
global file_content
sol = Solution()
buf = ['' for _ in xrange(100)]
file_content = "ab"
print buf[:sol.read(buf, 1)]
print buf[:sol.read(buf, 2)]
|
sthyme/ZFSchizophrenia
|
BehaviorAnalysis/mergingbehaviordata/lmmanalysisave_septcut4and2ifsame.py
|
Python
|
mit
| 13,435
| 0.026424
|
#!/usr/bin/python
import os,sys,glob,re
import numpy as np
import scipy
from scipy import stats
import datetime
import time
from datetime import timedelta
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
#from matplotlib import colors as c
#from matplotlib import cm
from scipy.stats.kde import gaussian_kde
from numpy import linspace
from scipy.stats import kruskal
#from scipy.stats import nanmean
#from scipy.stats import nanmedian
import pandas as pd
import statsmodels.api as sm
from scipy.stats import mstats
#freqlist = ["numberofbouts_min", "numberofbouts_10min", "dpixnumberofbouts_min", "dpixnumberofbouts_10min", "aveinterboutinterval_min", "aveinterboutinterval_10min", "avedpixinterboutinterval_min", "avedpixinterboutinterval_10min", "dpixsecpermin", "dpixminper10min", "distsecpermin", "distminper10min"]
#loclist = ["interboutcenterfrac", "interboutaverhofrac", "centerfrac", "averhofrac"]
#featlist = ["dpixavebouttime_min", "dpixavebouttime_10min", "aveboutvel_min", "aveboutvel_10min", "avebouttime_min", "avebouttime_10min", "aveboutspeed_min", "aveboutspeed_10min", "aveboutdist_min", "aveboutdist_10min", "aveboutdisp_min", "aveboutdisp_10min", "aveboutcumdpix_min", "aveboutcumdpix_10min"]
nonstimcombos = {"Frequency of movement": ["numberofbouts_min", "numberofbouts_10min", "dpixnumberofbouts_min", "dpixnumberofbouts_10min", "aveinterboutinterval_min", "aveinterboutinterval_10min", "avedpixinterboutinterval_min", "avedpixinterboutinterval_10min", "dpixsecper_min", "dpixminper_10min", "distsecper_min", "distminper_10min"], "Location in well": ["interboutcenterfrac_min", "interboutaverhofrac_min", "centerfrac_min", "averhofrac_min","interboutcenterfrac_10min", "interboutaverhofrac_10min", "centerfrac_10min", "averhofrac_10min"], "Features of movement": ["dpixavebouttime_min", "dpixavebouttime_10min", "aveboutvel_min", "aveboutvel_10min", "avebouttime_min", "avebouttime_10min", "aveboutspeed_min", "aveboutspeed_10min", "aveboutdist_min", "aveboutdist_10min", "aveboutdisp_min", "aveboutdisp_10min", "aveboutcumdpix_min", "aveboutcumdpix_10min"]}
typecombos = [["Night tap habituation", "Day tap habituation 1", "Day tap habituation 2", "Day tap habituation 3"], ["Day light flash", "Night light flash"],["Night early prepulse tap", "Day early prepulse tap"], ["Night all prepulse tap", "Day all prepulse tap"], ["Day all strong tap", "Night all strong tap"], ["Day early strong tap","Night early strong tap"],["Night early weak tap", "Day early weak tap"], ["Day all weak tap", "Night all weak tap"], ["Dark flash block 3 start","Dark flash block 3 end","Dark flash block 4 start","Dark flash block 4 end","Dark flash block
|
1 start","Dark flash block 1 end","Dark flash block 2 start","Dark flash block 2 end"]]
stimcombos = {
#"Day light flash and weak tap": ["106106"],
#"Night light flash and weak tap": ["night1061
|
06"],
"Night tap habituation": ["nighttaphab102", "nighttaphab1"],
"Day tap habituation 1": ["adaytaphab102", "adaytaphab1"],
"Day tap habituation 3": ["cdaytaphab102", "cdaytaphab1"],
"Day tap habituation 2": ["bdaytaphab102", "bdaytaphab1"],
"Day light flash": ["lightflash104"],
#"Day light flash": ["lightflash104", "lightflash0"],
"Night light flash": ["nightlightflash104"],
#"Night light flash": ["nightlightflash104", "nightlightflash0"],
"Night early prepulse tap": ["shortnightprepulseinhibition100b"],
#"Night early prepulse tap": ["shortnightprepulseinhibition100b", "shortnightprepulseinhibition100c"],
"Night all prepulse tap": ["nightprepulseinhibition100b"],
#"Night all prepulse tap": ["nightprepulseinhibition100b", "nightprepulseinhibition100c"],
"Day early prepulse tap": ["shortdayprepulseinhibition100b"],
#"Day early prepulse tap": ["shortdayprepulseinhibition100b", "shortdayprepulseinhibition100c"],
"Day all prepulse tap": ["dayprepulseinhibition100b"],
#"Day all prepulse tap": ["dayprepulseinhibition100b", "dayprepulseinhibition100c"],
"Day all weak tap": ["dayprepulseinhibition100a", "dayprepulseinhibition101"],
"Day early weak tap": ["shortdayprepulseinhibition100a", "shortdayprepulseinhibition101"],
"Night all weak tap": ["nightprepulseinhibition100a", "nightprepulseinhibition101"],
"Night early weak tap": ["shortnightprepulseinhibition100a", "shortnightprepulseinhibition101"],
"Day early strong tap": ["adaytappre102", "shortdayprepulseinhibition102"],
#"Day early strong tap": ["adaytappre102", "adaytappre1", "shortdayprepulseinhibition102"],
"Day all strong tap": ["dayprepulseinhibition102", "adaytappostbdaytappre102","bdaytappostcdaytappre102", "cdaytappost102"],
#"Day all strong tap": ["dayprepulseinhibition102", "adaytappostbdaytappre102","bdaytappostcdaytappre102", "bdaytappostcdaytappre1", "cdaytappost1", "cdaytappost102","adaytappostbdaytappre1"],
"Night early strong tap": ["nighttappre102"],
#"Night early strong tap": ["nighttappre1", "nighttappre102"],
"Night all strong tap": ["nightprepulseinhibition102","nighttappost102"],
#"Night all strong tap": ["nightprepulseinhibition102","nighttappost102", "nighttappost1"],
#"Dark flash all blocks": ["darkflash103", "darkflash0"],
"Dark flash block 3 start": ["cdarkflash103"],
"Dark flash block 3 end": ["c2darkflash103"],
"Dark flash block 1 start": ["adarkflash103"],
"Dark flash block 1 end": ["a2darkflash103"],
"Dark flash block 2 start": ["bdarkflash103"],
"Dark flash block 2 end": ["b2darkflash103"],
"Dark flash block 4 start": ["ddarkflash103"],
"Dark flash block 4 end": ["d2darkflash103"]}
# "Dark flash block 3 start": ["cdarkflash103", "cdarkflash0"],
# "Dark flash block 3 end": ["c2darkflash103", "c2darkflash0"],
# "Dark flash block 1 start": ["adarkflash103", "adarkflash0"],
# "Dark flash block 1 end": ["a2darkflash103", "a2darkflash0"],
# "Dark flash block 2 start": ["bdarkflash103", "bdarkflash0"],
# "Dark flash block 2 end": ["b2darkflash103", "b2darkflash0"],
# "Dark flash block 4 start": ["ddarkflash103", "ddarkflash0"],
# "Dark flash block 4 end": ["d2darkflash103", "d2darkflash0"]}
#direction = {
# "aveboutspeed": 1
# "aveboutspeed": 1
# ones that are opposite of expected
# fullboutdatamaxloc (max peak location (larger is less strong of response))
# latency (longer is less good), similar to max peak
# aveinterboutinterval
# rho or centerfrac, not sure which orientation would want
# make wall-hugging positive
# lower centerfrac means more positive, which is how it is right now I think, yes, so if I default everything to switching signs, then averhofrac is the odd one out and should be skipped
# for most, larger should mean - and should mean mutant is stronger response or more movement
# need to make most into opposite
# standard
# cumdpix, displacement, distance, speed, velocity, secpermin, numberofbouts, frequency of response, polygonarea
# unsure - fullboutdata as done with linear model, and also the dark flash ones done with linear model
#}
direction_swaps = ["rhofrac", "latency", "interboutinterval", "fullboutdatamaxloc"]
for file in glob.glob("*linearmodel*"): # THIS IS WHAT THE PRINT OUTPUT MUST POINT TO, CAN HAVE SOMETHING AT END, BUT MUST START THIS WAY
if "finalsorted" in file:
continue
dir = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
ffile = open('finalsortedupdatedCP4or2_' + file + "_" + dir, 'w')
ofile = open(file, 'r')
lines = ofile.readlines()
pdict = {}
for line in lines:
# anova data
if line.startswith("anova:"):
pval = line.split(":")[3].strip().split()[3].strip()
#anova: ribgraph_mean_ribbon_latencyresponse_dpix_nighttappost102.png : Mean of array wt, mut, H-stat, P-value: 25.8557471264 21.4177419355 2.63243902441 0.104700765405
meanwtminmut = float(line.split(":")[3].strip().split()[0]) - float(line.split(":")[3].strip().split()[1])
name = line.split(":")[1].strip()
pdict[name] = [pval, meanwtminmut]
# ffile.write(str(pval))
# ffile.write(', ')
# ffile.write(str(meanwtminmut))
# ffile.write(', ')
# ffile.write(name.strip())
# ffile.write('\n')
# linear mixed model data - this formatting could change if I change the linear model I'm using
else:
list = []
for line in range(0, len(lines)):
|
aferr/LatticeMemCtl
|
src/mem/ruby/network/garnet/fixed-pipeline/GarnetRouter_d.py
|
Python
|
bsd-3-clause
| 2,044
| 0.001468
|
# Copyright (c) 2008 Princeton University
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistrib
|
ution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the
|
following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import *
from BasicRouter import BasicRouter
class GarnetRouter_d(BasicRouter):
type = 'GarnetRouter_d'
cxx_class = 'Router_d'
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
"number of virtual networks")
|
culturagovbr/sistema-nacional-cultura
|
adesao/migrations/0019_auto_20181005_1645.py
|
Python
|
agpl-3.0
| 8,604
| 0.005811
|
# Generated by Django 2.0.8 on 2018-10-05 19:45
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def cria_sistema_cultura(apps, schema_editor):
erros = []
SistemaCultura = apps.get_model('adesao', 'SistemaCultura')
Municipio = apps.get_model('adesao', 'Municipio')
Cidade = apps.get_model('adesao', 'Cidade')
EnteFederado = apps.get_model('adesao', 'EnteFederado')
Secretario = apps.get_model('adesao', 'Secretario')
Funcionario = apps.get_model('adesao', 'Funcionario')
Gestor = apps.get_model('adesao', 'Gestor')
Sede = apps.get_model('adesao', 'Sede')
Diligencia = apps.get_model('gestao', 'Diligencia')
DiligenciaSimples = apps.get_model('gestao', 'DiligenciaSimples')
Componente = apps.get_model('planotrabalho', 'Componente')
for municipio in Municipio.objects.all():
sistema_cultura = SistemaCultura()
sistema_cultura.gestor = Gestor.objects.create(
cpf=municipio.cpf_prefeito,
rg=municipio.rg_prefeito,
orgao_expeditor_rg=municipio.orgao_expeditor_rg,
estado_expeditor=municipio.estado_expeditor,
nome=municipio.nome_prefeito,
telefone_um=municipio.telefone_um,
|
telefone_dois=municipio.telefone
|
_dois,
telefone_tres=municipio.telefone_tres,
email_institucional=municipio.email_institucional_prefeito,
tipo_funcionario=3,
termo_posse=municipio.termo_posse_prefeito,
rg_copia=municipio.rg_copia_prefeito,
cpf_copia=municipio.cpf_copia_prefeito
)
sistema_cultura.sede = Sede.objects.create(
localizacao=municipio.localizacao,
cnpj=municipio.cnpj_prefeitura,
endereco=municipio.endereco,
complemento=municipio.complemento,
cep=municipio.cep,
bairro=municipio.bairro,
telefone_um=municipio.telefone_um,
telefone_dois=municipio.telefone_dois,
telefone_tres=municipio.telefone_tres,
endereco_eletronico=municipio.endereco_eletronico
)
if municipio.cidade is None:
try:
sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=municipio.estado.codigo_ibge)
except EnteFederado.DoesNotExist:
ente = EnteFederado.objects.filter(nome__icontains=municipio.estado.nome_uf)
if not ente or len(ente) > 1:
print(f"Erro ao procurar UF {municipio.estado.nome_uf} - {municipio.estado.codigo_ibge}\n")
erros.append(municipio.estado.codigo_ibge)
pass
sistema_cultura.ente_federado = ente[0]
else:
try:
cidade = Cidade.objects.get(nome_municipio=municipio.cidade.nome_municipio, uf=municipio.estado)
sistema_cultura.ente_federado = EnteFederado.objects.get(cod_ibge=cidade.codigo_ibge)
except EnteFederado.DoesNotExist:
ente = EnteFederado.objects.filter(cod_ibge__startswith=cidade.codigo_ibge)
if not ente or len(ente) > 1:
print(f"Erro ao procurar Municipio {municipio.cidade.nome_municipio} - {municipio.cidade.codigo_ibge}\n")
erros.append(municipio.estado.codigo_ibge)
pass
sistema_cultura.ente_federado = ente[0]
componentes_antigos = ('criacao_sistema', 'orgao_gestor', 'conselho_cultural', 'plano_cultura')
componente_type = ('36', '37', '38', '40')
componentes_novos = ('legislacao', 'orgao_gestor', 'conselho', 'plano')
sistema_cultura.numero_processo = municipio.numero_processo
try:
sistema_cultura.cadastrador = municipio.usuario
sistema_cultura.estado_processo = municipio.usuario.estado_processo
sistema_cultura.data_publicacao_acordo = municipio.usuario.data_publicacao_acordo
sistema_cultura.link_publicacao_acordo = municipio.usuario.link_publicacao_acordo
sistema_cultura.processo_sei = municipio.usuario.processo_sei
if municipio.usuario.plano_trabalho:
diligencia = Diligencia.objects.filter(
componente_id=municipio.usuario.plano_trabalho.id,
componente_type_id=35).order_by('-data_criacao').first()
if diligencia:
sistema_cultura.diligencia = DiligenciaSimples.objects.create(
texto_diligencia=diligencia.texto_diligencia,
classificacao_arquivo=diligencia.classificacao_arquivo,
usuario=diligencia.usuario)
sistema_cultura.diligencia.save()
for nome_componente_antigo, nome_componente_novo, tipo_componente in zip(componentes_antigos, componentes_novos, componente_type):
if municipio.usuario.plano_trabalho:
componente_antigo = getattr(municipio.usuario.plano_trabalho, nome_componente_antigo)
if componente_antigo:
setattr(sistema_cultura, nome_componente_novo, Componente.objects.create())
componente_novo = getattr(sistema_cultura, nome_componente_novo)
componente_novo.tipo = componentes_novos.index(nome_componente_novo)
componente_novo.arquivo = componente_antigo.arquivo
componente_novo.situacao = componente_antigo.situacao.id
componente_novo.data_envio = componente_antigo.data_envio
componente_novo.data_publicacao = componente_antigo.data_publicacao
diligencia = Diligencia.objects.filter(
componente_id=componente_antigo.id,
componente_type_id=tipo_componente).order_by('-data_criacao').first()
if diligencia:
componente_novo.diligencia = DiligenciaSimples.objects.create(
texto_diligencia=diligencia.texto_diligencia,
classificacao_arquivo=diligencia.classificacao_arquivo,
usuario=diligencia.usuario)
componente_novo.save()
secretario = municipio.usuario.secretario
if secretario:
sistema_cultura.secretario = Funcionario.objects.create(cpf=secretario.cpf_secretario,
rg=secretario.rg_secretario, orgao_expeditor_rg=secretario.orgao_expeditor_rg,
estado_expeditor=secretario.estado_expeditor, nome=secretario.nome_secretario,
cargo=secretario.cargo_secretario, instituicao=secretario.instituicao_secretario,
telefone_um=secretario.telefone_um, telefone_dois=secretario.telefone_dois,
telefone_tres=secretario.telefone_tres,
email_institucional=secretario.email_institucional_secretario,
tipo_funcionario=0)
responsavel = municipio.usuario.responsavel
if responsavel:
sistema_cultura.responsavel = Funcionario.objects.create(cpf=responsavel.cpf_responsavel,
rg=responsavel.rg_responsavel, orgao_expeditor_rg=responsavel.orgao_expeditor_rg,
estado_expeditor=responsavel.estado_expeditor, nome=responsavel.nome_responsavel,
cargo=responsavel.cargo_responsavel, instituicao=responsavel.instituicao_responsavel,
telefone_um=responsavel.telefone_um, telefone_dois=responsavel.telefone_dois,
telefone_tres=responsavel.telefone_tres,
email_institucional=responsavel.email_institucional_responsavel,
tipo_funcionario=1)
except ObjectDoesNotExist:
s
|
terhorstd/nest-simulator
|
pynest/examples/hh_psc_alpha.py
|
Python
|
gpl-2.0
| 2,263
| 0
|
# -*- coding: utf-8 -*-
#
# hh_psc_alpha.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Example using hh_psc_alpha
-------------------------------
This example produces a rate-response (FI) curve of the Hodgkin-Huxley
neuron in response to a range of different current (DC) stimulations.
The result is plotted using matplotlib.
Since a DC input affetcs only the neuron's channel dynamics, this routine
does not yet check correctness of synaptic response.
References
~~~~~~~~~~~
See Also
~~~~~~~~~~
:Authors:
KEYWORDS:
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
nest.hl_api.set_verbosity('M_WARNING')
nest.ResetKernel()
simtime = 1000
# Amplitude range, in pA
dcfrom = 0
dcstep = 20
dcto = 2000
h = 0.1 # simulation step size in mS
neuron = nest.Create('hh_psc_alpha')
sd = nest.Create('spike_detector')
nest.SetStatus(sd, {'to_memory': False})
nest.Connect(neuron, sd, syn_spec={'weight': 1.0, 'delay': h})
# Simulation loop
n_data = int(dcto / float(dcstep))
amplitudes = np.zeros(n_data)
event_freqs = np.zeros(n_data)
for i, amp in enumerate(range(dcfrom, dcto, dcstep)):
nest.SetStatus(neuron, {'I_e': float(amp)})
print("Simulating with current I={} pA".format(amp))
nest.Simulate(1000) # one second warm-up time for eq
|
uilibrium state
nest.SetStatus(sd, {'n_events': 0}) # then reset spike counts
|
nest.Simulate(simtime) # another simulation call to record firing rate
n_events = nest.GetStatus(sd, keys={'n_events'})[0][0]
amplitudes[i] = amp
event_freqs[i] = n_events / (simtime / 1000.)
plt.plot(amplitudes, event_freqs)
plt.show()
|
JackDanger/sentry
|
src/sentry/south_migrations/0212_auto__add_organizationoption__add_unique_organizationoption_organizati.py
|
Python
|
bsd-3-clause
| 40,363
| 0.008052
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OrganizationOption'
db.create_table('sentry_organizationoptions', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('organization', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Organization'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=64)),
('value', self.gf('sentry.db.models.fields.pickle.UnicodePickledObjectField')()),
))
db.send_create_signal('sentry', ['OrganizationOption'])
# Adding unique constraint on 'OrganizationOption', fields ['organization', 'key']
db.create_unique('sentry_organizationoptions', ['organization_id', 'key'])
def backwards(self, orm):
# Removing unique constraint on 'OrganizationOption', fields ['organization', 'key']
db.delete_unique('sentry_organizationoptions', ['organization_id', 'key'])
# Deleting model 'OrganizationOption'
db.delete_table('sentry_organizationoptions')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id':
|
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_leng
|
th': '32'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [],
|
robertapengelly/ppa-helper
|
ppa_helper/compat.py
|
Python
|
gpl-3.0
| 3,031
| 0.002639
|
#------------------------------------------------------------------------------------------
#
# Copyright 2017 Robert Pengelly.
#
# This file is part of ppa-helper.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------------------
# coding: utf-8
from __future__ import unicode_literals
import collections
import os
import shutil
import sys
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
def compat_setenv(key, value, env=os.environ):
env[key] = value
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_
|
kwargs = lambda kwargs: kwargs
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_g
|
et_terminal_size(fallback=(80, 24)):
columns = compat_getenv('COLUMNS')
if columns:
columns = int(columns)
else:
columns = None
lines = compat_getenv('LINES')
if lines:
lines = int(lines)
else:
lines = None
if columns is None or lines is None or columns <= 0 or lines <= 0:
try:
sp = subprocess.Popen(
['stty', 'size'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = sp.communicate()
_lines, _columns = map(int, out.split())
except Exception:
_columns, _lines = _terminal_size(*fallback)
if columns is None or columns <= 0:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
|
i-rabot/tractogithub
|
tracformatter/trac/tests/functional/compat.py
|
Python
|
bsd-3-clause
| 650
| 0.003077
|
#!/usr/bin/python
import os
import shutil
from trac.util.compat import close_fds
# On Windows, shutil.rmtree doesn't remove
|
files with the read-only
# attribute set, so this function explicitly removes it on every error
# before retrying. Even on Linux, shutil.rmtree chokes on read-only
# directories, so we use this version in all cases.
# Fix from http://bitten.edgewall.org/changeset/521
def rmtree(root):
"""Catch shutil.rmtree failures on Windows when files are read-only."""
def _hand
|
le_error(fn, path, excinfo):
os.chmod(path, 0666)
fn(path)
return shutil.rmtree(root, onerror=_handle_error)
|
rectory-school/rectory-apps
|
courseevaluations/migrations/0005_auto_20151208_1014.py
|
Python
|
mit
| 877
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('academics', '0016_student_auth_key'),
('courseevaluations', '0004_auto_20151208_1004'),
]
operations = [
migrations.RemoveField(
model_name='evaluable',
name='students',
),
migrations.RemoveField(
model_name='freeformquestionanswer',
name='question',
),
migrations.RemoveField(
|
model_name='multiplechoicequestionanswer',
name='student',
),
migrations.AddField(
model_name='evaluable',
name='student',
field=models.ForeignKey(to='academ
|
ics.Student', default=None),
preserve_default=False,
),
]
|
mhoffma/micropython
|
tests/thread/thread_qstr1.py
|
Python
|
mit
| 825
| 0.004848
|
# test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock =
|
_thread.allocate_lock()
n_thread = 4
n_finished = 0
n_qstr_per_t
|
hread = 100 # make 1000 for a more stressful test (uses more heap)
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * n_qstr_per_thread, n_qstr_per_thread))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
mice-software/maus
|
src/common_py/calibration/get_kl_calib.py
|
Python
|
gpl-3.0
| 3,304
| 0.003329
|
# This file is part of MAUS: http://micewww.pp.rl.ac.uk/projects/maus
#
# MAUS is free software: you
|
can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a
|
copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
#
"""
Get KL calibrations from DB
"""
import cdb
import json
from Configuration import Configuration
from cdb._exceptions import CdbPermanentError
class GetCalib:
"""
Evaluator class to evaluate mathematical expressions
Able to see many simple math expressions and some common units; the
standard geant4 system of units is enabled.
"""
def __init__(self):
"""
Initialise the evaluator with math functions and units
"""
self._current_cali = {}
self.reset()
cfg = Configuration()
cfgdoc = cfg.getConfigJSON()
cfgdoc_json = json.loads(cfgdoc)
cdb_url = cfgdoc_json['cdb_download_url'] + 'calibration?wsdl'
self.cdb_server = cdb.Calibration()
self.cdb_server.set_url(cdb_url)
#print 'Server: ', self.cdb_server.get_name(), \
# self.cdb_server.get_version()
try:
cdb.Calibration().get_status()
except CdbPermanentError:
raise CdbPermanentError("CDB error")
def get_calib(self, devname, ctype, fromdate):
"""
Evaluate a string expression given by formula
"""
if devname != "" and ctype != "":
if devname != "KL" or ctype != "gain":
raise Exception('get_kl_calib failed. \
Invalid detector/calib type.')
# check whether we are asked for the current calibration
# or calibration for an older date
if fromdate == "" or fromdate == "current":
#print 'getting current calib', devname, ctype
try:
self._current_cali = \
self.cdb_server.get_current_calibration(devname, ctype)
except CdbPermanentError:
self._current_cali = "cdb_permanent_error"
else:
#print 'getting calib for date', fromdate
try:
self._current_cali = \
self.cdb_server.get_calibration_for_date(devname,
fromdate,
ctype)
except CdbPermanentError:
self._current_cali = "cdb_permanent_error"
#print self._current_cali
else:
raise Exception('get_kl_calib failed. No device/calibration type.')
return self._current_cali
def reset(self):
"""
Reinitialize calibration
"""
self._current_cali = {}
|
aericson/Djax
|
pax/library.py
|
Python
|
bsd-3-clause
| 3,541
| 0.017509
|
"""
Client for the library API.
"""
class LibraryClient(object):
"""
Library API client.
"""
def __init__(self,axilent_connection):
self.content_resource = axilent_connection.resource_client('axilent.library','content')
self.api = axilent_connection.http_client('axilent.library')
def create_content(self,content_type,project,search_index=True,**field_data):
"""
Creates the content. Returns the new content item key in the format:
<content-type>:<content-key>
"""
response = self.content_resource.post(data={'content_type':content_type,
'project':project,
'search_index':search_index,
'content':field_data})
return response['created_content']
def update_content(self,content_type,project,content_key,search_index=True,reset_workflow=True,**field_data):
"""
Updates existing content.
"""
response = self.content_resource.put(data={'content_type':content_type,
'project':project,
'key':content_key,
'search_index':search_index,
'reset_workflow':reset_workflow,
'content':field_data})
return response['updated_content']
def ping(self,project,content_type):
"""
Tests connection wit
|
h Axilent.
"""
return self.api.ping(project=project,content_type=content_type)
def index_content(self,project,content_type,content_key):
"""
Forces re-indexing of the specified content item.
"""
response = self.api.indexcontent(content_key=content_key,
p
|
roject=project,
content_type=content_type)
return response['indexed']
def tag_content(self,project,content_type,content_key,tag,search_index=True):
"""
Tags the specified content item.
"""
response = self.api.tagcontent(project=project,
content_type=content_type,
content_key=content_key,
tag=tag,
search_index=search_index)
return response['tagged_content']
def detag_content(self,project,content_type,content_key,tag,search_index=True):
"""
De-tags the specified content item.
"""
response = self.api.detagcontent(project=project,
content_type=content_type,
content_key=content_key,
tag=tag,
search_index=search_index)
return response['removed_tag']
def archive_content(self,project,content_type,content_key):
"""
Archives the content on Axilent.
"""
response = self.content_resource.delete(params={'content_type':content_type,
'project':project,
'key':content_key})
return response['archived']
|
texnofobix/pyhwtherm
|
example.py
|
Python
|
mit
| 739
| 0.014885
|
import pyhwtherm
mytest = pyhwtherm.PyHWTherm(
username="someuser@example.com",
password="mysecretpassword",
deviceid=123456
)
print "login successful: ",mytest.login()
print "Get thermostat data:",
|
mytest.updateStatus()
beforeChange = mytest.status
print "Status: ", beforeChange
mytest.tempHold("11:00",cool=78,heat=68)
mytest.submit()
print "Get thermostat data:", mytest.updateStatus()
afterChange = mytest.status
print "heat >>",beforeChange['latestData']['uiData']['HeatSetpoint'],"->",afterChange['latestData']['uiData']['HeatSetpoint']
print "cool >>",beforeChange['latestData']['uiData']['CoolSetpoint'],"->",afterChange['latestData']['uiData']['CoolSetpoint']
print "Logout
|
", mytest.logout()
|
54Pany/gum
|
data/libdbs.py
|
Python
|
gpl-3.0
| 1,870
| 0.002674
|
#!/usr/bin/env python
# encoding: utf-8
import sqlite3
from sys import version_info
if version_info >= (3, 0, 0):
def listkey(dicts):
return list(dicts.keys())[0]
else:
def listkey(dicts):
return dicts.keys()[0]
class sqlitei:
'''Encapsulation sql.'''
def __init__(self, path):
self.db = sqlite3.connect(path)
# self.db.text_factory = str
self.cs = self.db.cursor()
def commit(self):
self.db.commit()
def select(self, table, column, dump=Non
|
e):
'''Select
table str, column list, dump dict.'''
columns = ','.join(column)
sql = 'select ' + columns + ' from ' + table
dumps = []
if dump:
dumpname = listkey(dump)
sql += ' where ' + dumpname + '=?'
dumps.append(dump[dumpname])
return self.cs.execute(sql, dumps)
def update(self, table, column, dump):
'''Update
table str, column dict,
|
dump dict.'''
columns = []
columnx = ''
for c in column:
columnx += c + '=?,'
columns.append(column[c])
dumpname = listkey(dump)
sql = 'update ' + table + ' set '+ columnx[:-1] + ' where ' + dumpname + '=?'
columns.append(dump[dumpname])
return self.cs.execute(sql, columns)
def insert(self, table, column, dump):
'''Insert
table str, column list, dump list'''
dumps = ('?,'*len(dump))[:-1]
columns = ','.join(column)
sql = 'insert into ' + table + ' (' + columns + ') values (' +dumps + ')'
return self.cs.execute(sql, dump)
def delete(self, table, dump):
'''Delete
table str, dump dict'''
dumpname = listkey(dump)
sql = 'delete from ' + table + ' where ' + dumpname + '=?'
return self.cs.execute(sql, [dump[dumpname]])
|
postlund/home-assistant
|
homeassistant/components/insteon/sensor.py
|
Python
|
apache-2.0
| 876
| 0.001142
|
"""Support for INSTEON dimmers via PowerLinc Modem."""
import logging
from homeassistant.helpers.entity import Entity
from .insteon_entity import InsteonEntity
_LOGGER =
|
logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the INSTEON device class for the hass platform."""
insteon_modem = hass.data["insteon"].get("modem")
address = discovery_info["address"]
device = insteon_modem.devices[addre
|
ss]
state_key = discovery_info["state_key"]
_LOGGER.debug(
"Adding device %s entity %s to Sensor platform",
device.address.hex,
device.states[state_key].name,
)
new_entity = InsteonSensorDevice(device, state_key)
async_add_entities([new_entity])
class InsteonSensorDevice(InsteonEntity, Entity):
"""A Class for an Insteon device."""
|
panchr/wire
|
wire/tests/sqlstring_test.py
|
Python
|
gpl-3.0
| 1,665
| 0.027628
|
import unittest
import wire
class TestSQLString(unittest.TestCase):
def setUp(self):
'''Sets up the test case'''
self.sql = wire.SQLString
def test_pragma(self):
'''Tests the PRAGMA SQL generation'''
self.assertEqual(self.sql.pragma("INTEGRITY_CHECK(10)"), "PRAGMA INTEGRITY_CHECK(10)")
self.assertEqual(self.sql.checkIntegrity(5), "PRAGMA INTEGRITY_CHECK(5)")
def test_createTable(self):
'''Tests the CREATE TABLE SQL generation'''
table_outputs = ["CREATE TABLE test (id INT NOT NULL,username VARCHAR(255) DEFAULT 'default_user')",
"CREATE TABLE test (username VARCHAR(255) DEFAULT 'default_user',id INT NOT NULL)"]
temp_table_outputs = ["CREATE TEMPORARY TABLE test_temp (value REAL DEFAULT 0.0,time TIMESTAMP DEFAULT CURRENT_TIMESTAMP)",
"CREATE TEMPORARY TABLE test_temp (time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,value REAL DEFAULT 0.0)"]
self.assertIn(self.sql.createTable("test", False, id = "INT", username = ["VARCHAR(255)", "'default_user'"]), table_outputs)
self.assertIn(self.sql.createTable("test_temp", True, value = ["REAL", 0.0], time = ["TIMESTAMP", "CURRENT_TIMESTAMP"]), temp_table_outputs)
# include a Temp table test (False --> True)
def test_dropTable(self):
'''Tests the DROP TABLE SQL generation'''
self.assertEqual(self.sql.dropTable("table_drop"), "DROP TABLE table_drop")
self.assertEqual(s
|
elf.sql.dropTable("some_other_table"), "DROP TABLE some_other_table")
def test_renameTable(self):
'''Tests the ALTER TABLE RENAME SQL generation'''
s
|
elf.assertEqual(self.sql.rename("orig_table", "new_table"), "ALTER TABLE orig_table RENAME TO new_table")
if __name__ == '__main__':
unittest.main()
|
boada/vpCluster
|
data/boada/analysis_all/MLmethods/plot_massComparison_scatter.py
|
Python
|
mit
| 1,151
| 0.002606
|
from glob import glob
import pylab as pyl
import h5py as hdf
files = glob('ML_predicted_masses*')
# get the power law masses
with hdf.File('../results_cluster.hdf5', 'r') as f:
dset = f[f.keys()[0]]
results = dset.value
# make a figure
f = pyl.figure(figsize=(6, 6 * (pyl.sqrt(5.) - 1.0) / 2.0))
ax = f.add_subplot(111)
i = 0
for f, c, l in zip(files, ['#7a
|
68a6', '#e24a33'],
['$ML_{\sigma, N_{gals}}$, Flat HMF',
'$ML_{\sigma, N_{gals}}
|
$']):
if i == 0:
i += 1
continue
with hdf.File(f, 'r') as f1:
dset = f1[f1.keys()[0]]
ML = dset.value
ax.errorbar(results['MASS'],
ML['ML_pred'],
xerr=results['MASS_err'],
yerr=ML['ML_pred_err'],
fmt='o',
color=c,
markersize=10,
label=l) #f.rstrip('.hdf5'))
ax.set_xlabel('Log M$_{pred, PL}$')
ax.set_ylabel('Log M$_{pred, ML}$')
ax.plot([12.5, 15.5], [12.5, 15.5], c='k', zorder=0)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='upper left')
pyl.show()
|
haochi/json-stable-stringify-python
|
tests/test_stringify.py
|
Python
|
mit
| 1,700
| 0.004118
|
import unittest
from .context import json_stable_stringify_python as stringify
class TestStringify(unittest.TestCase):
def test_simple_object(self):
node = {'c':6, 'b': [4,5], 'a': 3, 'z': None}
actual = stringify.stringify(node)
expected = '{"a":3,"b":[4,5],"c":6,"z":null}'
self.assertEqual(actual, expected)
def test_object_with_empty_string(self):
node = {'a': 3, 'z': ''}
actual = stringify.stringify(node)
expected = '{"a":3,"z":""}'
self.assertEqual(actual, expected)
def test_nested_object(self):
node = {
'a': {
'b': {
'c': [1,2,3,None]
}
}
}
actual = stringify.stringify(node)
expected =
|
'{"a":{"b":{"c":[1,2,3,null]}}}'
self.assertEqual(actual, expected)
def test_array_with_objects(self):
node = [{'z': 1, 'a': 2}]
actual = stringify.stringify(
|
node)
expected = '[{"a":2,"z":1}]'
self.assertEqual(actual, expected)
def test_nested_array_objects(self):
node = [{'z': [[{'y': 1, 'b': 2}]], 'a': 2}]
actual = stringify.stringify(node)
expected = '[{"a":2,"z":[[{"b":2,"y":1}]]}]'
self.assertEqual(actual, expected)
def test_array_with_none(self):
node = [1, None]
actual = stringify.stringify(node)
expected = '[1,null]'
self.assertEqual(actual, expected)
def test_array_with_empty_string(self):
node = [1, '']
actual = stringify.stringify(node)
expected = '[1,""]'
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
dulaccc/Accountant
|
accountant/settings/common.py
|
Python
|
mit
| 5,873
| 0.000851
|
"""
Django settings for accountant project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import sys
from decimal import Decimal
import accounting
VERSION = accounting.VERSION
DISPLAY_VERSION = accounting.get_version()
DISPLAY_SHORT_VERSION = accounting.get_short_version()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.basename(BASE_DIR)
# Add the BASE_DIR to the path in order to reuse the apps easily
sys.path.append(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7k)j*lewj6va4yqz=#1^z@6wtf!$#dx(u=z!3(351rc27c9fm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
LOCAL_SERVER = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
SITE_MAIN_DOMAIN = 'example.com'
SITE_MAIN_NAME = 'example.com'
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'djrill',
'crispy_forms',
'stronghold', # enforce login on the whole app
'avatar', # for user avatars
'allauth',
'allauth.account',
'allauth.socialaccount',
# social providers
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.twitter',
)
# Accounting apps
from accounting import get_apps
LOCAL_APPS = get_apps()
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# Migrations
MIGRATION_MODULES = {
'sites': 'migrations.sites',
'socialaccount': 'migrations.socialaccount',
}
from accounting import ACCOUNTING_MIDDLEWARE_CLASSES
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'stronghold.middleware.LoginRequiredMiddleware',
) + ACCOUNTING_MIDDLEWARE_CLASSES
ROOT_URLCONF = 'accountant.urls'
WSGI_APPLICATION = 'wsgi.application'
# Emailing
DEFAULT_FROM_EMAIL = 'noreply@accountant.fr'
# Templates
# https://docs.djangoproject.com/en/1.7/ref/settings/#template-context-processors
from accounting import ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
) + ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#template-dirs
from accounting import ACCOUNTING_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
ACCOUNTING_MAIN_TEMPLATE_DIR,
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# See: https://docs.djangoproject.com/en/1.7/ref/contrib/staticfiles\
# /#std:setting-STATICFILES_DIRS
STA
|
TICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"djangobower.finders.BowerFinder",
)
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/
|
media/'
# Bower config
BOWER_COMPONENTS_ROOT = os.path.abspath(os.path.join(BASE_DIR, 'components'))
BOWER_INSTALLED_APPS = (
'modernizr',
'jquery',
'bootstrap',
)
# Select 2
AUTO_RENDER_SELECT2_STATICS = False
SELECT2_BOOTSTRAP = True
# Custom User
LOGIN_REDIRECT_URL = 'connect:getting-started'
LOGIN_URL = 'account_login'
# Authentication
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = False
ACCOUNT_USERNAME_REQUIRED = False
# Stronghold
STRONGHOLD_PUBLIC_URLS = (
r'^%s.+$' % STATIC_URL,
r'^%s.+$' % MEDIA_URL,
r'^/accounts/.*$',
)
STRONGHOLD_PUBLIC_NAMED_URLS = (
)
# Forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Accounting
from accounting.defaults import *
|
dcm-oss/blockade
|
blockade/tests/util.py
|
Python
|
apache-2.0
| 1,956
| 0.001022
|
##############################################################################
#
# Copyright Zope Foundation and Contributors
|
.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
########
|
######################################################################
import time
class Wait(object):
class TimeOutWaitingFor(Exception):
"A test condition timed out"
timeout = 9
wait = .01
def __init__(self, timeout=None, wait=None, exception=None,
getnow=(lambda: time.time), getsleep=(lambda: time.sleep)):
if timeout is not None:
self.timeout = timeout
if wait is not None:
self.wait = wait
if exception is not None:
self.TimeOutWaitingFor = exception
self.getnow = getnow
self.getsleep = getsleep
def __call__(self, func=None, timeout=None, wait=None, message=None):
if func is None:
return lambda func: self(func, timeout, wait, message)
if func():
return
now = self.getnow()
sleep = self.getsleep()
if timeout is None:
timeout = self.timeout
if wait is None:
wait = self.wait
wait = float(wait)
deadline = now() + timeout
while 1:
sleep(wait)
if func():
return
if now() > deadline:
raise self.TimeOutWaitingFor(
message or
getattr(func, '__doc__') or
getattr(func, '__name__')
)
wait = Wait()
|
ratschlab/ASP
|
examples/undocumented/python_modular/graphical/multiclass_qda.py
|
Python
|
gpl-2.0
| 3,312
| 0.038647
|
"""
Shogun demo
Fernando J. Iglesias Garcia
"""
import numpy as np
import matplotlib as mpl
import pylab
import util
from scipy import linalg
from shogun.Classifier import QDA
from shogun.Features import RealFeatures, MulticlassLabels
# colormap
cmap = mpl.colors.LinearSegmentedColormap('color_classes',
{'red': [(0, 1, 1),
(1, .7, .7)],
'green': [(0, 1, 1),
(1, .7, .7)],
'blue': [(0, 1, 1),
(1, .7, .7)]})
pylab.cm.register_cmap(cmap = cmap)
# Generate data from Gaussian distributions
def gen_data():
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([-4, 3]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-1, -5]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([3, 4])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def plot_data(qda, X, y, y_pred, ax):
X0, X1, X2 = X[y == 0], X[y == 1], X[y == 2]
|
# Correctly classified
tp = (y == y_pred)
tp0, tp1, tp2 = tp[y == 0], tp[y == 1], tp[y == 2]
X0_tp, X1_tp, X2_tp = X0[tp0], X1[tp1], X2[tp2]
# Misclassified
X0_fp, X1_fp, X2_fp = X0[tp0 != True], X1[tp1 != True], X2[tp2 != True]
# Class 0 data
pylab.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color = cols[0])
pylab.plot(X0_fp[:, 0], X0_f
|
p[:, 1], 's', color = cols[0])
m0 = qda.get_mean(0)
pylab.plot(m0[0], m0[1], 'o', color = 'black', markersize = 8)
# Class 1 data
pylab.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color = cols[1])
pylab.plot(X1_fp[:, 0], X1_fp[:, 1], 's', color = cols[1])
m1 = qda.get_mean(1)
pylab.plot(m1[0], m1[1], 'o', color = 'black', markersize = 8)
# Class 2 data
pylab.plot(X2_tp[:, 0], X2_tp[:, 1], 'o', color = cols[2])
pylab.plot(X2_fp[:, 0], X2_fp[:, 1], 's', color = cols[2])
m2 = qda.get_mean(2)
pylab.plot(m2[0], m2[1], 'o', color = 'black', markersize = 8)
def plot_cov(plot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0]) # rad
angle = 180 * angle / np.pi # degrees
# Filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2*v[0]**0.5, 2*v[1]**0.5, 180 + angle, color = color)
ell.set_clip_box(plot.bbox)
ell.set_alpha(0.5)
plot.add_artist(ell)
def plot_regions(qda):
nx, ny = 500, 500
x_min, x_max = pylab.xlim()
y_min, y_max = pylab.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
dense = RealFeatures(np.array((np.ravel(xx), np.ravel(yy))))
dense_labels = qda.apply(dense).get_labels()
Z = dense_labels.reshape(xx.shape)
pylab.pcolormesh(xx, yy, Z)
pylab.contour(xx, yy, Z, linewidths = 3, colors = 'k')
# Number of classes
M = 3
# Number of samples of each class
N = 300
# Dimension of the data
dim = 2
cols = ['blue', 'green', 'red']
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.title('Quadratic Discrimant Analysis')
X, y = gen_data()
labels = MulticlassLabels(y)
features = RealFeatures(X.T)
qda = QDA(features, labels, 1e-4, True)
qda.train()
ypred = qda.apply().get_labels()
plot_data(qda, X, y, ypred, ax)
for i in range(M):
plot_cov(ax, qda.get_mean(i), qda.get_cov(i), cols[i])
plot_regions(qda)
pylab.connect('key_press_event', util.quit)
pylab.show()
|
houqp/floyd-cli
|
floyd/client/version.py
|
Python
|
apache-2.0
| 580
| 0
|
from floyd.client
|
.base import FloydHttpClient
from floyd.model.versi
|
on import CliVersion
from floyd.log import logger as floyd_logger
class VersionClient(FloydHttpClient):
"""
Client to get API version from the server
"""
def __init__(self):
self.url = "/cli_version"
super(VersionClient, self).__init__(skip_auth=True)
def get_cli_version(self):
response = self.request("GET", self.url)
data_dict = response.json()
floyd_logger.debug("CLI Version info: %s", data_dict)
return CliVersion.from_dict(data_dict)
|
Ymir-RPG/ymir-api
|
run.py
|
Python
|
apache-2.0
| 70
| 0
|
from ymir import ap
|
p
app.run(debug=True, host='0.0.0.0', port=2841
|
)
|
eLRuLL/scrapy
|
scrapy/__init__.py
|
Python
|
bsd-3-clause
| 1,151
| 0.007819
|
"""
Scrapy - a web crawling and web scraping framework written for Python
"""
__all__ = ['__version__', 'version_info', 'twisted_version',
'Spider', 'Request', 'FormRequest', 'Selector', 'Item', 'Field']
# Scrapy version
import pkgutil
__version__ = pkgutil.get_data(__package__, 'VERSION').decode('ascii').strip()
version_info = tuple(int(v) if v.isdigit() else v
for v in __version__.split('.'))
del pkgutil
# Check minimum required Python version
import sys
if sys.version_info < (3, 5):
print("Scrapy %s req
|
uires Python 3.5" % __version__)
sys.exit(1)
# Ignore noisy twisted deprecation warnings
import
|
warnings
warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')
del warnings
# Apply monkey patches to fix issues in external libraries
from scrapy import _monkeypatches
del _monkeypatches
from twisted import version as _txv
twisted_version = (_txv.major, _txv.minor, _txv.micro)
# Declare top-level shortcuts
from scrapy.spiders import Spider
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
from scrapy.item import Item, Field
del sys
|
mivanov-utwente/t4proj
|
t4proj/apps/stats/models.py
|
Python
|
bsd-2-clause
| 481
| 0.002079
|
from django.db.models import Transfor
|
m
from django.db.models import DateTimeField, TimeField
from django.utils.functional import cached_property
class TimeValue(Transform):
lookup_name = 'time'
function = 'ti
|
me'
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'TIME({})'.format(lhs), params
@cached_property
def output_field(self):
return TimeField()
DateTimeField.register_lookup(TimeValue)
|
trabacus-softapps/docker-edumedia
|
additional_addons/Edumedia_India/ed_sale.py
|
Python
|
agpl-3.0
| 56,005
| 0.017034
|
from openerp.osv import fields,osv
import time
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp import pooler
from openerp import netsvc
import base64
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.Edumedia_India import config
class sale_order(osv.osv):
def history(self, cr, uid, cases, keyword, history=False, subject=None, email=False, details=None, email_from=False, message_id=False, attach=[], context=None):
mailgate_pool = self.pool.get('mailgate.thread')
return mailgate_pool.history(cr, uid, cases, keyword, history=history,\
subject=subject, email=email, \
details=details, email_from=email_from,\
message_id=message_id, attach=attach, \
context=context)
def _get_partner_default_addr(self, cr, uid, ids, name, arg, context=None):
res = {}
for case in self.browse(cr, uid, ids, context=context):
addr = self.pool.get('res.partner').address_get(cr, uid, [case.partner_id.id], ['default'])
res[case.id] = addr['default']
return res
# def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
# return super(sale_order,self)._amount_all(cr, uid, ids,field_name,arg,context=context)
def _get_class_details(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for case in self.browse(cr, uid, ids, context=context):
res[case.id] = {
'tot_class': 0, 'low_std': 0, 'high_std': 0, 'tot_student' : 0, 'tot_sectn':0 }
cnt_class = l_std = h_std = studnts = sectns = 0
class_std = []
if case.class_ids:
for line in case.class_ids:
cnt_class += 1
class_std.append(line.ed_class)
studnts += line.ed_students
sectns += line.ed_sec
if class_std:
l_std = min(class_std)
h_std = max(class_std)
res[case.id]['tot_class'] = cnt_class
res[case.id]['low_std'] = l_std
res[case.id]['high_std'] = h_std
res[case.id]['tot_student'] = studnts
res[case.id]['tot_sectn'] = sectns
return res
# def _get_order(self, cr, uid, ids, context=None):
# result = {}
# for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
# result[line.order_id.id] = True
# return result.keys()
def _get_delivry_ids(self, cr, uid, ids, field_name, arg, context=None):
delivry_obj = self.pool.get("stock.picking")
res = {}
for case in self.browse(cr,uid,ids,context):
res[case.id] = delivry_obj.search(cr, uid, [('sale_id', '=', case.id),('state','=','done')])
return res
_inherit='sale.order'
_columns={
# Overridden
'product_id': fields.many2one('product.product', 'Product', change_default=True,states={'draft': [('readonly', False)]}),
# 'amount_untaxed': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Untaxed Amount',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The amount without tax."),
# 'amount_tax': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Taxes',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The tax amount."),
# 'amount_total': fields.function(_amount_all, method=True, digits_compute= dp.get_precision('Sale Price'), string='Total',
# store = {
# 'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
# 'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
# },
# multi='sums', help="The total amount."),
'state': fields.selection([
('draft', 'Quotation'),
# ('waiting_date', 'Waiting Schedule'),
# ('proposal_sent', 'Proposal Sent'),
# ('proposal_accept','Proposal Accepted'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled'),
],'State',readonly=True),
# Extra Fields
'films_only':fields.boolean('Film License Only',readonly=True ,states={'draft': [('readonly', False)]}),
'address_ids':fields.many2many('res.partner','address_sale_rel','sale_id','address_id','Coordinator Details'),
'class_ids':fields.one2many('ed.class.details','sale_id','Class Details'),
'cap1_terms' : fields.char('Caption 1',size=100),
'cap1_text':fields.text('Caption Text',size=500),
'cap2_terms' : fields.char('Caption 2',size=100),
'cap2_text':fields.text('Caption Text',size=500),
'cap3_terms' : fields.char('Caption 3',size=100),
|
'cap3_text':fields.text('Caption Text',size=500),
'cap4_terms' : fields.char('Caption 4',size=100),
'cap4_text':fields.text('Caption Text',size=500),
'ed_type':fields.
|
selection([('so','Sale Order'),('crm','CRM')],'Type'),
'ed_license':fields.selection(config.CLASS_STD,'License',readonly=True ,states={'draft': [('readonly', False)]}),
'rsn_reject' : fields.text('Relationship Manager Remarks',readonly=True ,states={'draft': [('readonly', False)]}),
'ed_proj':fields.char('Project',size=100),
'ed_cdd':fields.integer('No.Of.CDD',readonly=True ,states={'draft': [('readonly', False)]}),
'ed_rate':fields.integer('Rate',readonly=True ,states={'draft': [('readonly', False)]}),
'license_rate':fields.integer('Rate',readonly=True ,states={'draft': [('readonly', False)]}),
'nxt_payment_date' : fields.date('Next Payment Date'),
'licen_stdate' : fields.date('Start Date',readonly=True ,states={'draft': [('readonly', False)]}),
'licen_eddate' : fields.date('End Date',readonly=True ,states={'draft': [('readonly', False)]}),
'invoice_id' : fields.many2one('account.invoice','Invoice No',readonly=True),
'training_ids':f
|
booski/hostdb9
|
dns_reader.py
|
Python
|
gpl-2.0
| 1,853
| 0.001619
|
# coding=utf-8
import sys
def read(client, vlans, cnames, print_warnings):
lines = []
if cnames:
for cname in client.list_cnames():
lines.append('cname\t' + cname['name'])
lines.append('target\t' + cname['canonical'])
lines.append('')
for vlan in client.list_vlans():
net = vlan['network']
if net not in vlans:
continue
lines.append('')
lines.append('network\t' + net)
for ip in client.list_vlan_ips(net):
types = ip['types']
addr = ip['ip_address']
lines.append('')
if types and 'HOST' not in types:
lines.append('host\t' + addr + '\t# in use as: '+', '.join(types))
continue
lines.append('host\t' + addr)
names = ip['names']
name = ''
extra = ''
if len(names) > 0:
name = names[0]
if len(na
|
mes) > 1:
extra = '\t# additional names: ' + ', '.join(names[1:])
if print_warnings:
print('Warning! '+ addr + ' has several names. '
+ 'Adding extra names as file comment.',
file=sys.stderr)
if name:
append_line = 'name\t' + name
if ex
|
tra:
append_line += extra
lines.append(append_line)
(comment, aliases) = client.get_host_info(name)
if comment:
lines.append('comment\t' + comment)
for alias in aliases:
lines.append('alias\t' + alias)
mac = ip['mac_address']
if mac and not name.startswith('dhcp'):
lines.append('mac\t' + mac)
return lines
|
Azure/azure-sdk-for-python
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/aio/operations/_managed_clusters_operations.py
|
Python
|
mit
| 54,887
| 0.005265
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_access_profile_request, build_get_request, build_get_upgrade_profile_request, build_list_by_resource_group_request, build_list_cluster_admin_credentials_request, build_list_cluster_monitoring_user_credentials_request, build_list_cluster_user_credentials_request, build_list_request, build_reset_aad_profile_request_initial, build_reset_service_principal_profile_request_initial, build_rotate_cluster_certificates_request_initial, build_update_tags_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
|
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
|
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_04_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterListResult", pipeline_response)
list_of_elem =
|
bgroff/kala-app
|
django_kala/api/basecamp_classic/companies/parsers.py
|
Python
|
mit
| 2,236
| 0
|
"""
Provides XML parsing support.
"""
from django.conf import settings
from rest_framework.exceptions import ParseError
from rest_framework.parsers import BaseParser
import defusedxml.ElementTree as etree
class XMLCompanyParser(BaseParser):
"""
XML company parser.
"""
media_type = 'application/xml'
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as XML and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
parser = etree.DefusedXMLParser(encoding=encoding)
try:
tree = etree.parse(stream, parser=parser, forbid_dtd=True)
except (etree.ParseError, ValueError) as exc:
raise ParseError(detail=str(exc))
data = self._xml_convert(tree.getroot())
return data
def _xml_convert(self, element):
"""
convert the xml `element` into the corresponding python object
"""
data = {}
fields = list(element)
for field in fields:
if field.tag == 'id':
data['id'] = str(field.text)
if field.tag == 'name':
data['name'] = str(field.text)
if field.tag == 'web-address':
data['website'] = str(field.text)
if field.tag == 'phone-number-office':
data['phone'] = str(field.text)
if field.tag == 'phone-number-fax':
data['fax'] = str(field.text)
if field.tag == 'address-one':
data['address'] = str(field.text)
if field.tag == 'address-two':
data['address1'] = str(field.text)
if field.t
|
ag == 'city':
data['city'] = str(field.text)
if field.tag == 'state':
data['state'] = st
|
r(field.text)
if field.tag == 'country':
data['country'] = str(field.text)
if field.tag == 'time-zone-id':
data['timezone'] = str(field.text)
if field.tag == 'locale':
data['locale'] = str(field.text)
return data
|
PAIR-code/interpretability
|
text-dream/python/helpers/setup_helper.py
|
Python
|
apache-2.0
| 3,457
| 0.010703
|
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides the setup for the experiments."""
from pytorch_pretrained_bert import modeling
from pytorch_pretrained_bert import tokenization
import torch
import embeddings_helper
def setup_uncased(model_config):
"""Setup the uncased bert model.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
embedding_map: Holding all token embeddings.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained(model_config)
_ = model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
# Initialize the embedding map
embedding_map = embeddings_helper.EmbeddingMap(device, model)
return tokenizer, model, device, embedding_map
def setup_bert_vanilla(model_config):
"""Setup the uncased bert model without embedding maps.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained(model_config)
_ = model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
return tokenizer, model, device
def setup_bert_mlm(model_config):
"""Setup the uncased bert model with classification head.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.
|
BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertForMaskedLM.from_pretrained('bert-base-uncased')
_ = model.eval()
# Set up the device in use
device = torch.device
|
('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
# Initialize the embedding map
embedding_map = embeddings_helper.EmbeddingMap(device, model.bert)
return tokenizer, model, device, embedding_map
|
md1024/rams
|
uber/tests/models/test_attendee.py
|
Python
|
agpl-3.0
| 16,952
| 0.001829
|
from uber.tests import *
class TestCosts:
@pytest.fixture(autouse=True)
def mocked_prices(self, monkeypatch):
monkeypatch.setattr(c, 'get_oneday_price', Mock(return_value=10))
monkeypatch.setattr(c, 'get_attendee_price', Mock(return_value=20))
def test_badge_cost(self):
assert 10 == Attendee(badge_type=c.ONE_DAY_BADGE).badge_cost
assert 20 == Attendee().badge_cost
assert 30 == Attendee(overridden_price=30).badge_cost
assert 0 == Attendee(paid=c.NEED_NOT_PAY).badge_cost
assert 20 == Attendee(paid=c.PAID_BY_GROUP).badge_cost
def test_total_cost(self):
assert 20 == Attendee().total_cost
assert 25 == Attendee(amount_extra=5).total_cost
def test_amount_unpaid(self, monkeypatch):
monkeypatch.setattr(Attendee, 'total_cost', 50)
assert 50 == Attendee().amount_unpaid
assert 10 == Attendee(amount_paid=40).amount_unpaid
assert 0 == Attendee(amount_paid=50).amount_unpaid
assert 0 == Attendee(amount_paid=51).amount_unpaid
def test_age_discount(self, monkeypatch):
monkeypatch.setattr(Attendee, 'age_group_conf', {'discount': 5})
assert 15 == Attendee().total_cost
assert 20 == Attendee(amount_extra=5).total_cost
assert 10 == Attendee(overridden_price=10).total_cost
assert 15 == Attendee(overridden_price=10, amount_extra=5).total_cost
def test_age_free(self, monkeypatch):
monkeypatch.setattr(Attendee, 'age_group_conf', {'discount': 999}) # makes badge_cost free unless overridden_price is set
assert 0 == Attendee().total_cost
assert 5 == Attendee(amount_extra=5).total_cost
assert 10 == Attendee(overridden_price=10).total_cost
assert 15 == Attendee(overridden_price=10, amount_extra=5).total_cost
def test_is_unpaid():
assert Attendee().is_unpaid
assert Attendee(paid=c.NOT_PAID).
|
is_unpaid
for status in [c.NEED_NOT_PAY, c.PAID_BY_GROUP, c.REFUNDED]:
assert not Attendee(paid=status).is_unpaid
# we may eventually want to make this a little more explicit;
# at the moment I'm basically just testing an implementation detail
def test_is_unassigned():
assert Attendee().is_unassigned
assert not Attendee(first_name='x').is_unassigned
def test_is_dealer():
assert not Att
|
endee().is_dealer
assert Attendee(ribbon=c.DEALER_RIBBON).is_dealer
assert Attendee(badge_type=c.PSEUDO_DEALER_BADGE).is_dealer
# not all attendees in a dealer group are necessarily dealers
dealer_group = Group(tables=1)
assert not Attendee(group=dealer_group).is_dealer
def test_is_dept_head():
assert not Attendee().is_dept_head
assert Attendee(ribbon=c.DEPT_HEAD_RIBBON).is_dept_head
def test_unassigned_name(monkeypatch):
monkeypatch.setattr(Attendee, 'badge', 'BadgeType')
assert not Attendee().unassigned_name
assert not Attendee(group_id=1, first_name='x').unassigned_name
assert '[Unassigned BadgeType]' == Attendee(group_id=1).unassigned_name
def test_full_name(monkeypatch):
assert 'x y' == Attendee(first_name='x', last_name='y').full_name
monkeypatch.setattr(Attendee, 'unassigned_name', 'xxx')
assert 'xxx' == Attendee(first_name='x', last_name='y').full_name
def test_last_first(monkeypatch):
assert 'y, x' == Attendee(first_name='x', last_name='y').last_first
monkeypatch.setattr(Attendee, 'unassigned_name', 'xxx')
assert 'xxx' == Attendee(first_name='x', last_name='y').last_first
def test_badge():
assert Attendee().badge == 'Unpaid Attendee'
assert Attendee(paid=c.HAS_PAID).badge == 'Attendee'
assert Attendee(badge_num=123).badge == 'Unpaid Attendee'
assert Attendee(badge_num=123, paid=c.HAS_PAID).badge == 'Attendee #123'
assert Attendee(ribbon=c.VOLUNTEER_RIBBON).badge == 'Unpaid Attendee (Volunteer)'
def test_is_transferable(monkeypatch):
assert not Attendee(paid=c.HAS_PAID).is_transferable
monkeypatch.setattr(Attendee, 'is_new', False)
assert Attendee(paid=c.HAS_PAID).is_transferable
assert Attendee(paid=c.PAID_BY_GROUP).is_transferable
assert not Attendee(paid=c.NOT_PAID).is_transferable
assert not Attendee(paid=c.HAS_PAID, checked_in=datetime.now(UTC)).is_transferable
assert not Attendee(paid=c.HAS_PAID, badge_type=c.STAFF_BADGE).is_transferable
assert not Attendee(paid=c.HAS_PAID, badge_type=c.GUEST_BADGE).is_transferable
def test_is_not_transferable_trusted(monkeypatch):
monkeypatch.setattr(Attendee, 'is_new', False)
assert not Attendee(paid=c.HAS_PAID, trusted_depts=c.CONSOLE).is_transferable
def test_trusted_somewhere():
assert Attendee(trusted_depts='{},{}'.format(c.ARCADE, c.CONSOLE)).trusted_somewhere
assert Attendee(trusted_depts=str(c.CONSOLE)).trusted_somewhere
assert not Attendee(trusted_depts='').trusted_somewhere
def test_has_personalized_badge():
assert not Attendee().has_personalized_badge
assert Attendee(badge_type=c.STAFF_BADGE).has_personalized_badge
assert Attendee(badge_type=c.SUPPORTER_BADGE).has_personalized_badge
for badge_type in [c.ATTENDEE_BADGE, c.ONE_DAY_BADGE, c.GUEST_BADGE]:
assert not Attendee(badge_type=badge_type).has_personalized_badge
def test_takes_shifts():
assert not Attendee().takes_shifts
assert not Attendee(staffing=True).takes_shifts
assert Attendee(staffing=True, assigned_depts=c.CONSOLE).takes_shifts
assert not Attendee(staffing=True, assigned_depts=c.CON_OPS).takes_shifts
assert Attendee(staffing=True, assigned_depts=','.join(map(str, [c.CONSOLE, c.CON_OPS]))).takes_shifts
class TestUnsetVolunteer:
def test_basic(self):
a = Attendee(staffing=True, trusted_depts=c.CONSOLE, requested_depts=c.CONSOLE, assigned_depts=c.CONSOLE, ribbon=c.VOLUNTEER_RIBBON, shifts=[Shift()])
a.unset_volunteering()
assert not a.staffing and not a.trusted_somewhere and not a.requested_depts and not a.assigned_depts and not a.shifts and a.ribbon == c.NO_RIBBON
def test_different_ribbon(self):
a = Attendee(ribbon=c.DEALER_RIBBON)
a.unset_volunteering()
assert a.ribbon == c.DEALER_RIBBON
def test_staff_badge(self, monkeypatch):
with Session() as session:
monkeypatch.setattr(Attendee, 'session', Mock())
a = Attendee(badge_type=c.STAFF_BADGE, badge_num=123)
a.unset_volunteering()
assert a.badge_type == c.ATTENDEE_BADGE and a.badge_num is None
def test_affiliate_with_extra(self):
a = Attendee(affiliate='xxx', amount_extra=1)
a._misc_adjustments()
assert a.affiliate == 'xxx'
def test_affiliate_without_extra(self):
a = Attendee(affiliate='xxx')
a._misc_adjustments()
assert a.affiliate == ''
def test_amount_refunded_when_refunded(self):
a = Attendee(amount_refunded=123, paid=c.REFUNDED)
a._misc_adjustments()
assert a.amount_refunded == 123
def test_amount_refunded_when_not_refunded(self):
a = Attendee(amount_refunded=123)
a._misc_adjustments()
assert not a.amount_refunded
def test_badge_precon(self):
a = Attendee(badge_num=1)
a._misc_adjustments()
assert not a.checked_in
def test_badge_at_con(self, monkeypatch, at_con):
a = Attendee()
a._misc_adjustments()
assert not a.checked_in
a = Attendee(badge_num=1)
a._misc_adjustments()
assert a.checked_in
monkeypatch.setattr(Attendee, 'is_new', False)
a = Attendee(badge_num=1)
a._misc_adjustments()
assert a.checked_in
def test_names(self):
a = Attendee(first_name='nac', last_name='mac Feegle')
a._misc_adjustments()
assert a.full_name == 'Nac mac Feegle'
a = Attendee(first_name='NAC', last_name='mac feegle')
a._misc_adjustments()
assert a.full_name == 'Nac Mac Feegle'
class TestStaffingAdjustments:
@pytest.fixture(autouse=True)
def unset_volunteering(self, monkeypatch):
monkeypatch.setattr(Attendee, 'unset_volunteering', Mock())
return Attendee.un
|
gst/amqpy
|
amqpy/consumer.py
|
Python
|
mit
| 2,846
| 0.000351
|
from abc import ABCMeta, abstractmethod
class AbstractConsumer(metaclass=ABCMeta):
"""
This class provides facilities to create and manage queue consumers. To
create a consumer, subclass this class and override the :meth:`run`
method. Then, instantiate the class with the desired parameters and call
:meth:`declare` to declare the consumer to the server.
Example::
class Consumer(AbstractConsumer):
def run(self, msg: Message):
print('Received message: {}'.format(msg.body))
msg.ack()
c1 = Consumer(ch, 'test.q')
c1.declare()
conn.drain_events()
"""
def __init__(self, channel, queue, consumer_tag='', no_local=False,
no_ack=False, exclusive=False):
"""
:param channel: channel
:type channel: amqpy.channel.Channel
:param str queue: queue
:param str consumer_tag: consumer tag, local to the connection; leave
blank to let server auto-assign a tag
:param bool no_local: if True: do not deliver own messages
:param bool no_ack: server will not expect an ack for each message
:param bool exclusive: request exclusive access
"""
self.channel = channel
self.queue = queue
self.consumer_tag = consumer_tag
self.no_local = no_local
self.no_ack = no_ack
self.exclusive = exclusive
#: Number of messages consumed (incremented automatically)
self.consume_count = 0
def declare(self):
"""Declare the consumer
This method calls :meth:`~amqpy.channel.Channel.basic_consu
|
me()`
internally.
After the queue consumer is created, :attr:`self.consumer_tag` is
set to the server-assigned consumer tag if a tag was not specified
initially.
"""
self.consumer_tag = self.channel.basic_consume(
self.queue, self.consume
|
r_tag, self.no_local, self.no_ack, self.exclusive,
callback=self.start, on_cancel=self.cancel_cb)
def cancel(self):
"""Cancel the consumer
"""
self.channel.basic_cancel(self.consumer_tag)
@abstractmethod
def run(self, msg):
"""Consumer callback
This method is called when the consumer is delivered a message. This
method **must** be overridden in the subclass.
:param msg: received message
:type msg: amqpy.message.Message
"""
pass
def cancel_cb(self, consumer_tag):
"""Consumer cancel callback
This method is called when the consumer is cancelled. This method may
be overridden in the subclass.
:param str consumer_tag: consumer tag
"""
pass
def start(self, msg):
self.run(msg)
self.consume_count += 1
|
DavidYen/YEngine
|
ypy/path_help.py
|
Python
|
mit
| 129
| 0.03876
|
imp
|
ort os
def NormalizedJoin( *args ):
"Normalizes and joins directory nam
|
es"
return os.path.normpath(os.path.join(*args))
|
stweil/letsencrypt
|
certbot-dns-route53/tests/dns_route53_test.py
|
Python
|
apache-2.0
| 9,471
| 0.00095
|
"""Tests for certbot_dns_route53._internal.dns_route53.Authenticator"""
import unittest
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
class AuthenticatorTest(unittest.TestCase, dns_test_common.BaseAuthenticatorTest):
# pylint: disable=protected-access
def setUp(self):
from certbot_dns_route53._internal.dns_route53 import Authenticator
super().setUp()
self.config = mock.MagicMock()
# Set up dummy credentials for testing
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret_access_key"
self.auth = Authenticator(self.config, "route53")
def tearDown(self):
# Remove the dummy credentials from env vars
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
def test_perform(self):
self.auth._change_txt_record = mock.MagicMock()
self.auth._wait_for_change = mock.MagicMock()
self.auth.perform([self.achall])
self.auth._change_txt_record.assert_called_once_with("UPSERT",
'_acme-challenge.' + DOMAIN,
mock.ANY)
self.assertEqual(self.auth._wait_for_change.call_count, 1)
def test_perform_no_credentials_error(self):
self.auth._change_txt_record = mock.MagicMock(side_effect=NoCredentialsError)
self.assertRaises(errors.PluginError,
self.auth.perform,
[self.achall])
def test_perform_client_error(self):
self.auth._change_txt_record = mock.MagicMock(
side_effect=ClientError({"Error": {"Code": "foo"}}, "bar"))
self.assertRaises(errors.PluginError,
self.auth.perform,
[self.achall])
def test_cleanup(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock()
self.auth.cleanup([self.achall])
self.auth._change_txt_record.assert_called_once_with("DELETE",
'_acme-challenge.'+DOMAIN,
mock.ANY)
def test_cleanup_no_credentials_error(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock(side_effect=NoCredentialsError)
self.auth.cleanup([self.achall])
def test_cleanup_client_error(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock(
side_effect=ClientError({"Error": {"Code": "foo"}}, "bar"))
self.auth.cleanup([self.achall])
class ClientTest(unittest.TestCase):
# pylint: disable=protected-access
PRIVATE_ZONE = {
"Id": "BAD-PRIVATE",
"Name": "example.com",
"Config": {
"PrivateZone": True
}
}
EXAMPLE_NET_ZONE = {
"Id": "BAD-WRONG-TLD",
"Name": "example.net",
"Config": {
"PrivateZone": False
}
}
EXAMPLE_COM_ZONE = {
|
"Id": "EXAMPLE",
"Name": "example.com",
"Config": {
"PrivateZone": False
}
}
FOO_EXAMPLE_COM_ZONE = {
"Id": "FOO",
|
"Name": "foo.example.com",
"Config": {
"PrivateZone": False
}
}
def setUp(self):
from certbot_dns_route53._internal.dns_route53 import Authenticator
self.config = mock.MagicMock()
# Set up dummy credentials for testing
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret_access_key"
self.client = Authenticator(self.config, "route53")
def tearDown(self):
# Remove the dummy credentials from env vars
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
def test_find_zone_id_for_domain(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.EXAMPLE_NET_ZONE,
self.EXAMPLE_COM_ZONE,
]
}
]
result = self.client._find_zone_id_for_domain("foo.example.com")
self.assertEqual(result, "EXAMPLE")
def test_find_zone_id_for_domain_pagination(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.PRIVATE_ZONE,
self.EXAMPLE_COM_ZONE,
]
},
{
"HostedZones": [
self.PRIVATE_ZONE,
self.FOO_EXAMPLE_COM_ZONE,
]
}
]
result = self.client._find_zone_id_for_domain("foo.example.com")
self.assertEqual(result, "FOO")
def test_find_zone_id_for_domain_no_results(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = []
self.assertRaises(errors.PluginError,
self.client._find_zone_id_for_domain,
"foo.example.com")
def test_find_zone_id_for_domain_no_correct_results(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.PRIVATE_ZONE,
self.EXAMPLE_NET_ZONE,
]
},
]
self.assertRaises(errors.PluginError,
self.client._find_zone_id_for_domain,
"foo.example.com")
def test_change_txt_record(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
self.client.r53.change_resource_record_sets = mock.MagicMock(
return_value={"ChangeInfo": {"Id": 1}})
self.client._change_txt_record("FOO", DOMAIN, "foo")
call_count = self.client.r53.change_resource_record_sets.call_count
self.assertEqual(call_count, 1)
def test_change_txt_record_delete(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
self.client.r53.change_resource_record_sets = mock.MagicMock(
return_value={"ChangeInfo": {"Id": 1}})
validation = "some-value"
validation_record = {"Value": '"{0}"'.format(validation)}
self.client._resource_records[DOMAIN] = [validation_record]
self.client._change_txt_record("DELETE", DOMAIN, validation)
call_count = self.client.r53.change_resource_record_sets.call_count
self.assertEqual(call_count, 1)
call_args = self.client.r53.change_resource_record_sets.call_args_list[0][1]
call_args_batch = call_args["ChangeBatch"]["Changes"][0]
self.assertEqual(call_args_batch["Action"], "DELETE")
self.assertEqual(
call_args_batch["ResourceRecordSet"]["ResourceRecords"],
[validation_record])
def test_change_txt_record_multirecord(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
|
data-tsunami/museo-cachi
|
cachi/migrations/0008_auto__del_field_piezaconjunto_fragmentos.py
|
Python
|
gpl-3.0
| 15,336
| 0.007303
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PiezaConjunto.fragmentos'
db.delete_column(u'cachi_piezaconjunto', 'fragmentos')
def backwards(self, orm):
# Adding field 'PiezaConjunto.fragmentos'
db.add_column(u'cachi_piezaconjunto', 'fragmentos',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cachi.adjunto': {
'Meta': {'object_name': 'Adjunto'},
'adjunto': ('django.db.models.fields.files.FileField', [], {'max_length': '768'}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'ficha_relevamiento_sitio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cachi.FichaRelevamientoSitio']", 'null': 'True', 'blank': 'True'}),
'ficha_tecnica': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'adjunto_ficha_tecnica'", 'null': 'True', 'to': u"orm['cachi.FichaTecnica']"}),
'fragmento': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'adjunto_fragmento'", 'null': 'True', 'to': u"orm['cachi.Fragmento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'informe_campo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cachi.InformeCampo']", 'null': 'True', 'blank': 'True'}),
'nombre_archivo': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'pieza_conjunto': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'adjunto_pieza_conjunto'", 'null': 'True', 'to': u"orm['cachi.PiezaConjunto']"}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'cachi.ficharelevamientositio': {
'Meta': {'object_name': 'FichaRelevamientoSitio'},
'adjuntos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cachi.Adjunto']", 'null': 'True', 'blank': 'True'}),
'autor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cachi.Persona']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'cachi.fichatecnica': {
'Meta': {'object_name': 'FichaTecnica'},
'alto': ('django.db.models.fields.PositiveIntegerField', [], {}),
'autor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cachi.Persona']", 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'decoracion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desperfectos': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desperfectos_fabricacion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'diagnostico_estado': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'diametro_max': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'diametro_min': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'espesor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'fecha': ('django.db.mod
|
els.fields.DateField', [], {}),
'fragmento': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fichas_tecnicas'", 'to': u"orm['cachi.
|
Fragmento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inscripciones_marcas': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'observacion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'otras_caracteristicas_distintivas': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'peso': ('django.db.models.fields.PositiveIntegerField', [], {}),
'razon_actualizacion': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reparaciones': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tratamiento': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'usuario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cachi.fragmento': {
'Meta': {'object_name': 'Fragmento'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numero_inventario': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pieza_conjunto': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fragmentos_pieza_conjunto'", 'to': u"orm['cachi.PiezaConjunto']"}),
'ultima_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'ultima_version'", 'n
|
forkbong/qutebrowser
|
qutebrowser/utils/usertypes.py
|
Python
|
gpl-3.0
| 16,178
| 0.000124
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom useful data types."""
import operator
import enum
import dataclasses
from typing import Any, Optional, Sequence, TypeVar, Union
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QTimer
from PyQt5.QtCore import QUrl
from qutebrowser.utils import log, qtutils, utils
_T = TypeVar('_T', bound=utils.SupportsLessThan)
class Unset:
"""Class for an unset object."""
__slots__ = ()
def __repr__(self) -> str:
return '<UNSET>'
UNSET = Unset()
class NeighborList(Sequence[_T]):
"""A list of items which saves its current position.
Class attributes:
Modes: Different modes, see constructor documentation.
Attributes:
fuzzyval: The value which is currently set but not in the list.
_idx: The current position in the list.
_items: A list of all items, accessed through item property.
_mode: The current mode.
"""
class Modes(enum.Enum):
"""Behavior for the 'mode' argument."""
edge = enum.auto()
exception = enum.auto()
def __init__(self, items: Sequence[_T] = None,
default: Union[_T, Unset] = UNSET,
mode: Modes = Modes.exception) -> None:
"""Constructor.
Args:
items: The list of items to iterate in.
_default: The initially selected value.
_mode: Behavior when the first/last item is reached.
Modes.edge: Go to the first/last item
Modes.exception: Raise an IndexError.
"""
if not isinstance(mode, self.Modes):
raise TypeError("Mode {} is not a Modes member!".format(mode))
if items is None:
self._items: Sequence[_T] = []
else:
self._items = list(items)
self._default = default
if not isinstance(default, Unset):
idx = self._items.index(default)
self._idx: Optional[int] = idx
else:
self._idx = None
self._mode = mode
self.fuzzyval: Optional[int] = None
def __getitem__(self, key: int) -> _T: # type: ignore[override]
return self._items[key]
def __len__(self) -> int:
return len(self._items)
def __repr__(self) -> str:
return utils.get_repr(self, items=self._items, mode=self._mode,
idx=self._idx, fuzzyval=self.fuzzyval)
def _snap_in(self, offset: int) -> bool:
"""Set the current item to the closest item to self.fuzzyval.
Args:
offset: negative to get the next smaller item, positive for the
next bigger one.
Return:
True if the value snapped in (changed),
False when the value already was in the list.
"""
assert isinstance(self.fuzzyval, (int, float)), self.fuzzyval
op = operator.le if offset < 0 else operator.ge
items = [(idx, e) for (idx, e) in enumerate(self._items)
if op(e, self.fuzzyval)]
if items:
item = min(
items,
key=lambda tpl:
abs(self.fuzzyval - tpl[1])) # type: ignore[operator]
else:
sorted_items = sorted(enumerate(self.items), key=lambda e: e[1])
idx = 0 if offset < 0 else -1
item = sorted_items[idx]
self._idx = item[0]
return self.fuzzyval not in self._items
def _get_new_item(self, offset: int) -> _T:
"""Logic for getitem to get the item at offset.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
assert self._idx is not None
try:
if self._idx + offset >= 0:
new = self._items[self._idx + offset]
else:
raise IndexError
except IndexError:
if self._mode == self.Modes.edge:
assert offset != 0
if offset > 0:
new = self.lastitem()
else:
new = self.firstitem()
elif self._mode == self.Modes.exception: # pragma: no branch
raise
else:
self._idx += offset
return new
@property
def items(self) -> Sequence[_T]:
"""Getter for items, which should not be set."""
return self._items
def getitem(self, offset: int) -> _T:
"""Get the item with a relative position.
Args:
offset: The offset of the current item, relative to the last one.
Return:
The new item.
"""
log.misc.debug("{} items, idx {}, offset {}".format(
len(self._items), self._idx, offset))
if not self._items:
raise IndexError("No items found!")
if self.fuzzyval is not None:
# Value has been set to something not in the list, so we snap in to
# the closest value in the right direction and count this as one
# step towards offset.
snapped = self._snap_in(offset)
if snapped and offset > 0:
offset -= 1
elif snapped:
offset += 1
self.fuzzyval = None
return self._get_new_item(offset)
def curitem(self) -> _T:
"""Get the current item in the list."""
if self._idx is not None:
return self._items[self._idx]
else:
raise IndexError("No current item!")
def nextitem(self) -> _T:
"""Get the next item in the list."""
return self.getitem(1)
def previtem(self) -> _T:
"""Get the previous item in the list."""
return self.getitem(-1)
def firstitem(self) -> _T:
"""Get the first item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = 0
return self.curitem()
def lastitem(self) -> _T:
"""Get the last item in the list."""
if not self._items:
raise IndexError("No items found!")
self._idx = len(self._items) - 1
return self.curitem()
def reset(self) -> _T:
"""Reset the position to the default."""
if self._default is UNSET:
raise ValueError("No default set!")
self
|
._idx = self._items.index(self._default)
return self.curitem()
class PromptMode(enum.Enum):
"""The mode of a Question."""
yesno = enum.auto()
text = enum.auto()
user_pwd = enum.auto()
alert = enum.auto()
download = enum.auto()
class ClickTarget(enum.Enum):
"""How to open a clicked link."""
normal = enum.auto() #: Open the link in the current tab
tab = enum.auto() #: Open the link in a new foreground tab
tab_bg = enum.a
|
uto() #: Open the link in a new background tab
window = enum.auto() #: Open the link in a new window
hover = enum.auto() #: Only hover over the link
class KeyMode(enum.Enum):
"""Key input modes."""
normal = enum.auto() #: Normal mode (no mode was entered)
hint = enum.auto() #: Hint mode (showing labels for links)
command = enum.auto() #: Command mode (after pressing the colon key)
yesno = enum.auto() #: Yes/No prompts
prompt = enum.auto() #: Text prompts
insert = enum.auto() #: Insert mode
|
timrae/anki
|
aqt/main.py
|
Python
|
agpl-3.0
| 39,738
| 0.000906
|
# Copyright: Damien Elmes <anki@ichi2.net>
# -*- coding: utf-8 -*-
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import re
import signal
import zipfile
from send2trash import send2trash
from aqt.qt import *
from anki import Collection
from anki.utils import isWin, isMac, intTime, splitFields, ids2str
from anki.hooks import runHook, addHook
import aqt
import aqt.progress
import aqt.webview
import aqt.toolbar
import aqt.stats
from aqt.utils import restoreGeom, showInfo, showWarning,\
restoreState, getOnlyText, askUser, applyStyles, showText, tooltip, \
openHelp, openLink, checkInvalidFilename
import anki.db
class AnkiQt(QMainWindow):
def __init__(self, app, profileManager, args):
QMainWindow.__init__(self)
self.state = "startup"
aqt.mw = self
self.app = app
if isWin:
self._xpstyle = QStyleFactory.create("WindowsXP")
self.app.setStyle(self._xpstyle)
self.pm = profileManager
# running 2.0 for the first time?
if self.pm.meta['firstRun']:
# load the new deck user profile
self.pm.load(self.pm.profiles()[0])
# upgrade if necessary
from aqt.upgrade import Upgrader
u = Upgrader(self)
u.maybeUpgrade()
self.pm.meta['firstRun'] = False
self.pm.save()
# init rest of app
if qtmajor == 4 and qtminor < 8:
# can't get modifiers immediately on qt4.7, so no safe mode there
self.safeMode = False
else:
self.safeMode = self.app.queryKeyboardModifiers() & Qt.ShiftModifier
try:
self.setupUI()
self.setupAddons()
except:
showInfo(_("Error during startup:\n%s") % traceback.format_exc())
sys.exit(1)
# must call this after ui set up
if self.safeMode:
tooltip(_("Shift key was held down. Skipping automatic "
"syncing and add-on loading."))
# were we given a file to import?
if args and args[0]:
self.onAppMsg(unicode(args[0], sys.getfilesystemencoding(), "ignore"))
# Load profile in a timer so we can let the window finish init and not
# close on profile load error.
if isMac and qtmajor >= 5:
self.show()
self.progress.timer(10, self.setupProfile, False)
def setupUI(self):
self.col = None
self.hideSchemaMsg = False
self.setupAppMsg()
self.setupKeys()
self.setupThreads()
self.setupFonts()
self.setupMainWindow()
self.setupSystemSpecific()
self.setupStyle()
self.setupMenus()
self.setupProgress()
self.setupErrorHandler()
self.setupSignals()
self.setupAutoUpdate()
self.setupHooks()
self.setupRefreshTimer()
self.updateTitleBar()
# screens
self.setupDeckBrowser()
self.setupOverview()
self.setupReviewer()
# Profiles
##########################################################################
def setupProfile(self):
self.pendingImport = None
# profile not provided on command line?
if not self.pm.name:
# if there's a single profile, load it automatically
profs = self.pm.profiles()
if len(profs) == 1:
try:
self.pm.load(profs[0])
except:
# password protected
pass
if not self.pm.name:
self.showProfileManager()
else:
self.loadProfile()
def showProfileManager(self):
self.state = "profileManager"
d = self.profileDiag = QDialog()
f = self.profileForm = aqt.forms.profiles.Ui_Dialog()
f.setupUi(d)
d.connect(f.login, SIGNAL("clicked()"), self.onOpenProfile)
d.connect(f.profiles, SIGNAL("itemDoubleClicked(QListWidgetItem*)"),
self.onOpenProfile)
d.connect(f.quit, SIGNAL("clicked()"), lambda: sys.exit(0)
|
)
d.connect(f.add, SIGNAL("clicked()"), self.onAddProfile)
d.connect(f.rename, SIGNAL("clicked()"), self.on
|
RenameProfile)
d.connect(f.delete_2, SIGNAL("clicked()"), self.onRemProfile)
d.connect(d, SIGNAL("rejected()"), lambda: d.close())
d.connect(f.profiles, SIGNAL("currentRowChanged(int)"),
self.onProfileRowChange)
self.refreshProfilesList()
# raise first, for osx testing
d.show()
d.activateWindow()
d.raise_()
d.exec_()
def refreshProfilesList(self):
f = self.profileForm
f.profiles.clear()
profs = self.pm.profiles()
f.profiles.addItems(profs)
try:
idx = profs.index(self.pm.name)
except:
idx = 0
f.profiles.setCurrentRow(idx)
def onProfileRowChange(self, n):
if n < 0:
# called on .clear()
return
name = self.pm.profiles()[n]
f = self.profileForm
passwd = not self.pm.load(name)
f.passEdit.setVisible(passwd)
f.passLabel.setVisible(passwd)
def openProfile(self):
name = self.pm.profiles()[self.profileForm.profiles.currentRow()]
passwd = self.profileForm.passEdit.text()
return self.pm.load(name, passwd)
def onOpenProfile(self):
if not self.openProfile():
showWarning(_("Invalid password."))
return
self.profileDiag.close()
self.loadProfile()
return True
def profileNameOk(self, str):
return not checkInvalidFilename(str)
def onAddProfile(self):
name = getOnlyText(_("Name:"))
if name:
name = name.strip()
if name in self.pm.profiles():
return showWarning(_("Name exists."))
if not self.profileNameOk(name):
return
self.pm.create(name)
self.pm.name = name
self.refreshProfilesList()
def onRenameProfile(self):
name = getOnlyText(_("New name:"), default=self.pm.name)
if not self.openProfile():
return showWarning(_("Invalid password."))
if not name:
return
if name == self.pm.name:
return
if name in self.pm.profiles():
return showWarning(_("Name exists."))
if not self.profileNameOk(name):
return
self.pm.rename(name)
self.refreshProfilesList()
def onRemProfile(self):
profs = self.pm.profiles()
if len(profs) < 2:
return showWarning(_("There must be at least one profile."))
# password correct?
if not self.openProfile():
return
# sure?
if not askUser(_("""\
All cards, notes, and media for this profile will be deleted. \
Are you sure?""")):
return
self.pm.remove(self.pm.name)
self.refreshProfilesList()
def loadProfile(self):
# show main window
if self.pm.profile['mainWindowState']:
restoreGeom(self, "mainWindow")
restoreState(self, "mainWindow")
else:
self.resize(500, 400)
# toolbar needs to be retranslated
self.toolbar.draw()
# titlebar
self.setWindowTitle("Anki - " + self.pm.name)
# show and raise window for osx
self.show()
self.activateWindow()
self.raise_()
# maybe sync (will load DB)
if self.pendingImport and os.path.basename(
self.pendingImport).startswith("backup-"):
# skip sync when importing a backup
self.loadCollection()
else:
self.onSync(auto=True)
# import pending?
if self.pendingImport:
if self.pm.profile['key']:
showInfo(_("""\
To import into a password protected profile, please open the profile before attempting to import."""))
else:
self.handleImport(self.pendingImport)
self
|
gslab-econ/gslab_python
|
gslab_make/dir_mod.py
|
Python
|
mit
| 9,540
| 0.005346
|
#! /usr/bin/env python
import os
import time
import traceback
import re
import locale
import subprocess
import zlib
import zipfile
import private.metadata as metadata
import private.messages as messages
from glob import glob
from private.exceptionclasses import CustomError, CritError, SyntaxError, LogicError
from private.preliminaries import print_error
#== Directory modification functions =================
def delete_files(pathname):
"""Delete files specified by a path
This function deletes a possibly-empty list of files whose names match
`pathname`, which must be a string containing a path specification.
`pathname` can be either absolute (like /usr/src/Python-1.5/Makefile)
or relative (like ../../Tools/*/*.gif). It can contain shell-style wildcards.
"""
print "\nDelete files", pathname
for f in glob(pathname):
os.remove(f)
def remove_dir(pathname, options = '@DEFAULTVALUE@'):
"""Remove a directory
This function completely removes the directory specified by `pathname`
using the 'rmdir' command on Windows platforms and the 'rm' command
on Unix platforms. This is useful for removing symlinks without
deleting the source files or directory.
"""
if os.name == 'posix':
os_command = 'rmdirunix'
if pathname[-1] == '/':
pathname = pathname[0:-1]
else:
os_command = 'rmdirwin'
command = metadata.commands[os_command]
if options == '@DEFAULTVALUE@':
options = metadata.default_options[os_command]
subprocess.check_call(command % (options, pathname), shell=True)
def check_manifest(manifestlog = '@DEFAULTVALUE@',
output_dir = '@DEFAULTVALUE@',
makelog = '@DEFAULTVALUE@'):
"""
Produce an error if there are any .dta files in "output_dir" and all
non-hidden sub-directories that are not in the manifest file "manifestlog",
and produce a warning if there are .txt or .csv files not in the manifest
along with a list of these files. All log is printed to "makelog" log file.
"""
# metadata.settings should not be part of argument defaults so that they can be
# overwritten by make_log.set_option
if manifestlog == '@DEFAULTVALUE@':
manifestlog = metadata.settings['manifest_file']
if output_dir == '@DEFAULTVALUE@':
output_dir = metadata.settings['output_dir']
if makelog == '@DEFAULTVALUE@':
makelog = metadata.settings['makelog_file']
print "\nCheck manifest log file", manifestlog
# Open main log file
try:
LOGFILE = open(makelog, 'ab')
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_log % makelog)
try:
# Open manifest log file
try:
MANIFESTFILE = open(manifestlog, 'rU')
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_log % manifestlog)
manifest_lines = MANIFESTFILE.readlines()
MANIFESTFILE.close()
# Get file list
try:
file_list = [];
for i in range(len(manifest_lines)):
if manifest_lines[i].startswith('File: '):
filepath = os.path.abspath(manifest_lines[i][6:].rstrip())
ext = os.path.splitext(filepath)[1]
if ext == '':
filepath = filepath + '.dta'
file_list.append( filepath )
except Exception as errmsg:
print errmsg
raise SyntaxError(messages.syn_error_manifest % manifestlog)
if not os.path.isdir(output_dir):
raise CritError(messages.crit_error_no_directory % (output_dir))
# Loop over all levels of sub-directories of output_dir
for root, dirs, files in os.walk(output_dir, topdown = True):
# Ignore non-hidden sub-directories
dirs_to_keep = []
for dirname in dirs:
if not dirname.startswith('.'):
dirs_to_keep.append(dirname)
dirs[:] = dirs_to_keep
# Check each file
for filename in files:
ext = os.path.splitext(filename)[1]
fullpath = os.path.abspath( os.path.join(root, filename) )
# non-hidden .dta file: error
if (not filename.startswith('.')) and (ext == '.dta'):
print 'Checking: ', fullpath
if not (fullpath in file_list):
raise CritError(messages.crit_error_no_dta_file % (filename, manifestlog))
# non-
|
hidden .csv file: warning
if (not filename.startswith('.')) and (ext == '.csv'):
print 'Checking: ', fullpath
if not (fullpath in file_list):
print messages.note_no_csv_file % (filename, manifestlog)
print >> LOGFILE, messages.note_no_csv_file % (filename, manifestlog)
# non-hidden .txt file: warning
if (not filename.startswith('.')) and (ext ==
|
'.txt'):
print 'Checking: ', fullpath
if not (fullpath in file_list):
print messages.note_no_txt_file % (filename, manifestlog)
print >> LOGFILE, messages.note_no_txt_file % (filename, manifestlog)
except:
print_error(LOGFILE)
LOGFILE.close()
def list_directory(top, makelog = '@DEFAULTVALUE@'):
"""List directories
This function lists all non-hidden sub-directories of the directory
specified by `top`, a path, and their content from the top down.
It writes their names, modified times, and sizes in bytes to the
log file specified by the path `makelog`.
"""
# metadata.settings should not be part of argument defaults so that they can be
# overwritten by make_log.set_option
if makelog == '@DEFAULTVALUE@':
makelog = metadata.settings['makelog_file']
print "\nList all files in directory", top
# To print numbers (file sizes) with thousand separator
locale.setlocale(locale.LC_ALL, '')
makelog = re.sub('\\\\', '/', makelog)
try:
LOGFILE = open(makelog, 'ab')
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_log % makelog)
print >> LOGFILE, '\n'
print >> LOGFILE, 'List of all files in sub-directories in', top
try:
if os.path.isdir(top):
for root, dirs, files in os.walk(top, topdown = True):
# Ignore non-hidden sub-directories
dirs_to_keep = []
for dirname in dirs:
if not dirname.startswith('.'):
dirs_to_keep.append(dirname)
dirs[:] = dirs_to_keep
# Print out the sub-directory and its time stamp
created = os.stat(root).st_mtime
asciiTime = time.asctime(time.localtime(created))
print >> LOGFILE, root
print >> LOGFILE, 'created/modified', asciiTime
# Print out all the files in the sub-directories
for name in files:
full_name = os.path.join(root, name)
created = os.path.getmtime(full_name)
size = os.path.getsize(full_name)
asciiTime = time.asctime(time.localtime(created))
print >> LOGFILE, '%50s' % name, '--- created/modified', asciiTime, \
'(', locale.format('%d', size, 1), 'bytes )'
except:
print_error(LOGFILE)
print >> LOGFILE, '\n'
LOGFILE.close()
def clear_dirs(*args):
"""Create fresh directories
This function creates a directory for each path specified in
*args if such a directory does not already exist. It deletes
all files in each direc
|
namlook/MongoLite
|
mongolite/schema_document.py
|
Python
|
bsd-3-clause
| 17,726
| 0.002369
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import logging
from copy import deepcopy
log = logging.getLogger(__name__)
from mongo_exceptions import StructureError, BadKeyError, AuthorizedTypeError
from helpers import DotCollapsedDict
# field wich does not need to be declared into the skeleton
STRUCTURE_KEYWORDS = []
class SchemaProperties(type):
def __new__(cls, name, bases, attrs):
attrs['_protected_field_names'] = set(
['_protected_field_names', '_namespaces', '_required_namespace'])
for base in bases:
parent = base.__mro__[0]
if hasattr(parent, 'skeleton'):
if parent.skeleton is not None:
if parent.skeleton:
if 'skeleton' not in attrs and parent.skeleton:
attrs['skeleton'] = parent.skeleton
else:
obj_skeleton = attrs.get('skeleton', {}).copy()
attrs['skeleton'] = parent.skeleton.copy()
attrs['skeleton'].update(obj_skeleton)
if hasattr(parent, 'optional'):
if parent.optional is not None:
if parent.optional:
if 'optional' not in attrs and parent.optional:
attrs['optional'] = parent.optional
else:
o
|
bj_optional = attrs.get('optional', {}).copy()
attrs['optional'] = parent.optional.copy()
attrs['optional'].update(obj_optional)
if hasattr(parent, 'default_values'):
if parent.default_values:
obj_default_values = attrs.get('default_values', {}).c
|
opy()
attrs['default_values'] = parent.default_values.copy()
attrs['default_values'].update(obj_default_values)
if hasattr(parent, 'skeleton') or hasattr(parent, 'optional'):
if attrs.get('authorized_types'):
attrs['authorized_types'] = list(set(parent.authorized_types).union(set(attrs['authorized_types'])))
for mro in bases[0].__mro__:
attrs['_protected_field_names'] = attrs['_protected_field_names'].union(list(mro.__dict__))
attrs['_protected_field_names'] = list(attrs['_protected_field_names'])
attrs['_namespaces'] = []
attrs['_collapsed_struct'] = {}
if (attrs.get('skeleton') or attrs.get('optional')) and name not in ["SchemaDocument", "Document"]:
base = bases[0]
if not attrs.get('authorized_types'):
attrs['authorized_types'] = base.authorized_types
if attrs.get('skeleton'):
base._validate_skeleton(attrs['skeleton'], name, attrs.get('authorized_types'))
attrs['_namespaces'].extend(list(base._SchemaDocument__walk_dict(attrs['skeleton'])))
attrs['_collapsed_struct'].update(DotCollapsedDict(attrs['skeleton'], remove_under_type=True))
if attrs.get('optional'):
base._validate_skeleton(attrs['optional'], name, attrs.get('authorized_types'))
attrs['_namespaces'].extend(list(base._SchemaDocument__walk_dict(attrs['optional'])))
attrs['_collapsed_struct'].update(DotCollapsedDict(attrs['optional'], remove_under_type=True))
cls._validate_descriptors(attrs)
if (attrs.get('skeleton') or attrs.get('optional')):
skel_doc = ""
for k, v in attrs.get('skeleton', {}).iteritems():
skel_doc += " "*8+k+" : "+str(v)+"\n"
opt_doc = ""
for k, v in attrs.get('optional', {}).iteritems():
opt_doc += " "*8+k+" : "+str(v)+"\n"
attrs['__doc__'] = attrs.get('__doc__', '')+"""
required fields: {
%s }
optional fields: {
%s }
""" % (skel_doc, opt_doc)
return type.__new__(cls, name, bases, attrs)
@classmethod
def _validate_descriptors(cls, attrs):
for dv in attrs.get('default_values', {}):
if not dv in attrs['_namespaces']:
raise ValueError("Error in default_values: can't find %s in skeleton" % dv )
class SchemaDocument(dict):
__metaclass__ = SchemaProperties
skeleton = None
optional = None
default_values = {}
authorized_types = [
type(None),
bool,
int,
long,
float,
unicode,
str,
basestring,
list,
dict,
datetime.datetime,
]
def __init__(self, doc=None, gen_skel=True, gen_auth_types=True):
"""
doc : a dictionnary
gen_skel : if True, generate automaticly the skeleton of the doc
filled with NoneType each time validate() is called. Note that
if doc is not {}, gen_skel is always False. If gen_skel is False,
default_values cannot be filled.
gen_auth_types: if True, generate automaticly the self.authorized_types
attribute from self.authorized_types
"""
if self.skeleton is None:
self.skeleton = {}
# init
if doc:
for k, v in doc.iteritems():
self[k] = v
gen_skel = False
if gen_skel:
self.generate_skeleton()
if self.default_values:
if self.skeleton:
self._set_default_fields(self, self.skeleton)
if self.optional:
self._set_default_fields(self, self.optional)
def generate_skeleton(self):
"""
validate and generate the skeleton of the document
from the skeleton (unknown values are set to None)
"""
if self.skeleton:
self.__generate_skeleton(self, self.skeleton)
if self.optional:
self.__generate_skeleton(self, self.optional)
#
# Public API end
#
@classmethod
def __walk_dict(cls, dic):
# thanks jean_b for the patch
for key, value in dic.items():
if isinstance(value, dict) and len(value):
if type(key) is type:
yield '$%s' % key.__name__
else:
yield key
for child_key in cls.__walk_dict(value):
if type(key) is type:
new_key = "$%s" % key.__name__
else:
new_key = key
#if type(child_key) is type:
#
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/conversion_value_rule_set_service/transports/__init__.py
|
Python
|
apache-2.0
| 1,117
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionValueRuleSetServiceTransport
from .grpc import Conversi
|
onValueRuleS
|
etServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionValueRuleSetServiceTransport]]
_transport_registry["grpc"] = ConversionValueRuleSetServiceGrpcTransport
__all__ = (
"ConversionValueRuleSetServiceTransport",
"ConversionValueRuleSetServiceGrpcTransport",
)
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/sqlserver/v20180328/models.py
|
Python
|
mit
| 284,748
| 0.002484
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AccountCreateInfo(AbstractModel):
"""账号创建信息
"""
def __init__(self):
r"""
:param UserName: 实例用户名
:type UserName: str
:param Password: 实例密码
:type Password: str
:param DBPrivileges: DB权限列表
:type DBPrivileges: list of DBPrivilege
:param Remark: 账号备注信息
:type Remark: str
:param IsAdmin: 是否为管理员账户,默认为否
:type IsAdmin: bool
"""
self.UserName = None
self.Password = None
self.DBPrivileges = None
self.Remark = None
self.IsAdmin = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Password = params.get("Password")
if params.get("DBPrivileges") is not None:
self.DBPrivileges = []
for item in params.get("DBPrivileges"):
obj = DBPrivilege()
obj._deserialize(item)
self.DBPrivileges.append(obj)
self.Remark = params.get("Remark")
self.IsAdmin = params.get("IsAdmin")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccountDetail(AbstractModel):
"""账户信息详情
"""
def __init__(self):
r"""
:param Name: 账户名
:type Name: str
:param Remark: 账户备注
:type Remark: str
:param CreateTime: 账户创建时间
:type CreateTime: str
:param Status: 账户状态,1-创建中,2-正常,3-修改中,4-密码重置中,-1-删除中
:type Status: int
:param UpdateTime: 账户更新时间
:type UpdateTime: str
:param PassTime: 密码更新时间
:type PassTime: str
:param InternalStatus: 账户内部状态,正常为enable
:type InternalStatus: str
:param Dbs: 该账户对相关db的读写权限信息
:type Dbs: list of DBPrivilege
:param IsAdmin: 是否为管理员账户
:type IsAdmin: bool
"""
self.Name = None
self.Remark = None
self.CreateTime = None
self.Status = None
self.UpdateTime = None
self.PassTime = None
self.InternalStatus = None
self.Dbs = None
self.IsAdmin = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Remark = params.get("Remark")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.UpdateTime = params.get("UpdateTime")
self.PassTime = params.get("PassTime")
self.InternalStatus = params.get("InternalStatus")
if params.get("Dbs") is not None:
self.Dbs = []
for item in params.get("Dbs"):
obj = DBPrivilege()
obj._deserialize(item)
self.Dbs.append(obj)
self.IsAdmin = params.get("IsAdmin")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccountPassword(AbstractModel):
"""实例账号密码信息
"""
def __init__(self):
r"""
:param UserName: 用户名
:type UserName: str
:param Password: 密码
:type Password: str
"""
self.UserName = None
self.Password = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Password = params.get("Password")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccountPrivilege(AbstractModel):
"""数据库账号权限信息。创建数据库时设置
"""
def __init__(self):
r"""
:param UserName: 数据库用户名
:type UserName: str
:param Privilege: 数据库权限。ReadWrite表示可读写,ReadOnly表示只读
:type Privilege: str
"""
self.UserName = None
self.Privilege = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Privilege = params.get("Privilege")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccountPrivilegeModifyInfo(AbstractModel):
"""数据库账号权限变更信息
"""
def __init__(self):
r"""
:param UserName: 数据库用户名
|
:type UserName: str
:param DBPrivileges: 账号权限变更信息
:type DBPrivileges: list of DBPrivilegeModifyInfo
"""
self.UserName = None
self.DBPrivileges = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
if params.get("DBPrivileges") is not None:
self.DBPrivileges = []
for item in params.get("DBPrivileges"):
|
obj = DBPrivilegeModifyInfo()
obj._deserialize(item)
self.DBPrivileges.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AccountRemark(AbstractModel):
"""账户备注信息
"""
def __init__(self):
r"""
:param UserName: 账户名
:type UserName: str
:param Remark: 对应账户新的备注信息
:type Remark: str
"""
self.UserName = None
self.Remark = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Remark = params.get("Remark")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AssociateSecurityGroupsRequest(AbstractModel):
"""AssociateSecurityGroups请求参数结构体
"""
def __init__(self):
r"""
:param SecurityGroupId: 安全组ID。
:type SecurityGroupId: str
:param InstanceIdSet: 实例ID 列表,一个或者多个实例ID组成的数组。多个实例必须是同一个地域,同一个可用区,同一个项目下的。
:type InstanceIdSet: list of str
"""
self.SecurityGroupId = None
self.InstanceIdSet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.InstanceIdSet = params.get("InstanceIdSet")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AssociateSecurityGroupsResponse(AbstractModel):
"""AssociateSecurityGroups返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type Reque
|
yrobla/pyjuegos
|
pgzero/music.py
|
Python
|
lgpl-3.0
| 2,526
| 0.000396
|
from pygame.mixer import music as _music
from .loaders import ResourceLoader
from . import constants
__all__ = [
'rewind', 'stop', 'fadeout', 'set_volume', 'get_volume', 'get_pos',
'set_pos', 'play', 'queue', 'pause', 'unpause',
]
|
_music.set_endevent(constants.MUSIC_END)
class _MusicLoader(ResourceLoader):
"""Pygame's music API acts as a singleton with one 'current' track.
No objects are returned that represent different tracks, so this loader
can't return anything use
|
ful. But it can perform all the path name
validations and return the validated path, so that's what we do.
This loader should not be exposed to the user.
"""
EXTNS = ['mp3', 'ogg', 'oga']
TYPE = 'music'
def _load(self, path):
return path
_loader = _MusicLoader('music')
# State of whether we are paused or not
_paused = False
def _play(name, loop):
global _paused
path = _loader.load(name)
_music.load(path)
_music.play(loop)
_paused = False
def play(name):
"""Play a music file from the music/ directory.
The music will loop when it finishes playing.
"""
_play(name, -1)
def play_once(name):
"""Play a music file from the music/ directory."""
_play(name, 0)
def queue(name):
"""Queue a music file to follow the current track.
This will load a music file and queue it. A queued music file will begin as
soon as the current music naturally ends. If the current music is ever
stopped or changed, the queued song will be lost.
"""
path = _loader.load(name)
_music.queue(path)
def is_playing(name):
"""Return True if the music is playing and not paused."""
return _music.get_busy() and not _paused
def pause():
"""Temporarily stop playback of the music stream.
Call `unpause()` to resume.
"""
global _paused
_music.pause()
_paused = True
def unpause():
"""Resume playback of the music stream after it has been paused."""
global _paused
_music.unpause()
_paused = False
def fadeout(seconds):
"""Fade out and eventually stop the music playback.
:param seconds: The duration in seconds over which the sound will be faded
out. For example, to fade out over half a second, call
``music.fadeout(0.5)``.
"""
_music.fadeout(int(seconds * 1000))
rewind = _music.rewind
stop = _music.stop
get_volume = _music.get_volume
set_volume = _music.set_volume
get_pos = _music.get_pos
set_pos = _music.set_pos
|
sethlaw/sputr
|
tests/xss_test.py
|
Python
|
gpl-3.0
| 3,768
| 0.005308
|
from .requests_test import RequestsTest
import copy
import sys
class XSSTest(RequestsTest):
def test(self):
if self.DEBUG: print("Run the XSS Tests")
passed = 0
failed = 0
messages = []
url = self.domain['protocol'] + self.domain['host'] + self.config['path']
print("XSS Test for " + url)
for k, v in self.config['params'].items():
result_text = []
result = 'PASS'
for p in self.payloads:
if self.DEBUG: print(url + "?" + k + "=" + v + " ("
|
+ p + ")")
if self.config['method'] == 'GET':
if self.DEBUG: print("Using GET " + self.config['path'])
data = copy.deepcopy(self.config['params'])
data[k] = data[k] + p
res = self.get(url, params=data)
if res.status_code != 200:
result_text.append('Payload ' + p + ' caused an unknown error for parameter ' +
|
k)
failed = failed + 1
result = 'ERROR'
else:
if 'testpath' in self.config:
res = self.get(self.domain['protocol'] + self.domain['host'] + self.config['testpath'])
if self.DEBUG: print("Status " + str(res.status_code))
# if self.DEBUG: print("Content " + str(res.text))
if p in res.text:
failed = failed + 1
result_text.append('=> Payload ' + p + ' not filtered for parameter ' + k)
sys.stderr.write('=> Payload ' + p + ' not filtered for parameter ' + k + '\n')
result = 'FAIL'
else:
passed = passed + 1
elif self.config['method'] == 'POST':
data = copy.deepcopy(self.config['params'])
data[k] = data[k] + p
if self.DEBUG: print("Using POST " + self.config['path'] + " data: " + str(data))
res1 = self.get(url) # Get in case we need CSRF tokens and/or other items from the form
res = self.post(url, data=data)
if res.status_code != 200:
result_text.append('Payload ' + p + ' caused an unknown error for parameter ' + k)
result = 'ERROR'
failed = failed + 1
elif res.status_code >= 300 and res.status_code <= 400:
print("Status Code: " + str(res.status_code))
else:
if 'testpath' in self.config:
res = self.get(self.domain['protocol'] + self.domain['host'] + self.config['testpath'])
if self.DEBUG: print("Status " + str(res.status_code))
# if self.DEBUG: print("Content " + str(res.text))
if p in res.text:
failed = failed + 1
result = 'FAIL'
result_text.append('=> Payload ' + p + ' not filtered for parameter ' + k)
sys.stderr.write('=> Payload ' + p + ' not filtered for parameter ' + k + '\n')
else:
passed = passed + 1
else:
if self.DEBUG: print("Endpoint method is not GET or POST")
self.report.add_test_result(url, self.config['method'], 'xss', k, result, result_text)
print("=> " + str(passed) + "/" + str(passed + failed) + " passed/total")
# print("Messages: " + str(messages))
|
emilssolmanis/tapes
|
tapes/local/timer.py
|
Python
|
apache-2.0
| 702
| 0
|
import
|
contextlib
from time import time
from .meter import Meter
from .stats import Stat
from .histogram import Histogram
class Timer(Stat):
def __init__(self):
self.count = 0
self.meter = Meter()
self.histogram = Histogram()
super(Timer, self).__init__()
@contextlib.contextmanager
def time(self):
|
start_time = time()
try:
yield
finally:
self.update(time() - start_time)
def update(self, value):
self.meter.mark()
self.histogram.update(value)
def get_values(self):
values = self.meter.get_values()
values.update(self.histogram.get_values())
return values
|
dsumike/adventofcode
|
python/11p2.py
|
Python
|
mit
| 1,491
| 0.032193
|
#!/usr/bin/env python
import re
letters = 'abcdefghijklmnopqrstuvwxyz'
with open("../input/11.txt") as fileobj:
password = fileobj.readline().strip()
print password
def rules(pass
|
word):
rules = [ rule1, rule2, rule3 ]
if all(rule(password) for rule in rules):
return True
else:
return False
def rule1(password):
# Rule 1: in the range of A-Z, must have 3 consecutive letters
# Check A-X for [abc, bcd, ..., wzy, xyz]
for i in range(24):
|
if letters[i:i+3] in password:
return True
# else rule 1 failed
return False
def rule2(password):
# Rule 2: No i, o, l
if 'i' in password or 'o' in password or 'l' in password:
return False
return True
def rule3(password):
# Rule 3: Password must contain at least 2 different, non-overlapping pairs of letters
# (aa, bb) or even (aa, aa) "aaaa"
pair = 0
skipnext = False
for i in range(len(password) - 1):
if skipnext:
skipnext = False
continue
if password[i] == password[i + 1]:
pair += 1
skipnext = True
return pair >1
def increment(password):
if password[-1] == 'z':
return increment(password[0:-1]) + 'a'
return password[0:-1] + letters[letters.index(password[-1]) + 1]
while True:
if rules(password):
print "Success! -- 1st password:", password
break
else:
password = increment(password)
# Next run
password = increment(password)
while True:
if rules(password):
print "Success! -- 2nd password:", password
break
else:
password = increment(password)
|
ismail-s/warehouse
|
tests/unit/i18n/test_init.py
|
Python
|
apache-2.0
| 1,632
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import i18n
def test_sets_locale(monkeypatch):
locale_obj = pretend.stub()
locale_cls = pretend.stub(
parse=pretend.call_recorder(lambda l: locale_obj),
)
monkeypatch.setattr(i18n, "Locale", locale_cls)
request = pretend.stub(locale_name=pretend.stub())
assert i18n._locale(request) is locale_obj
assert locale_cls.parse.calls == [pretend.call(request.locale_name)]
def test_inc
|
ludeme():
config_settings = {}
config = pretend.stub(
add_request_method=pretend.call_recorder(lambda f, name, reify: None),
get_settings=lambda: config_settings,
)
i18n.includeme(config)
assert config.add_request_method.calls == [
pretend.call(i18n._locale, name="locale", reify=True),
]
assert config_settings == {
"jinja2.filters": {
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18
|
n.filters:format_datetime",
},
"jinja2.globals": {
"l20n": "warehouse.i18n.l20n:l20n",
},
}
|
developerworks/horizon
|
horizon/tests/authors_tests.py
|
Python
|
apache-2.0
| 2,188
| 0.001371
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2012 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import commands
im
|
port unittest
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
|
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class AuthorsTestCase(unittest.TestCase):
def test_authors_up_to_date(self):
path_bits = (os.path.dirname(__file__), '..', '..')
root = os.path.normpath(os.path.join(*path_bits))
contributors = set()
missing = set()
authors_file = open(os.path.join(root, 'AUTHORS'), 'r').read()
if os.path.exists(os.path.join(root, '.git')):
mailmap = parse_mailmap(os.path.join(root, '.mailmap'))
for email in commands.getoutput('git log --format=%ae').split():
if not email:
continue
if "jenkins" in email and "openstack.org" in email:
continue
email = '<' + email + '>'
contributors.add(str_dict_replace(email, mailmap))
for contributor in contributors:
if not contributor in authors_file:
missing.add(contributor)
self.assertTrue(len(missing) == 0,
'%r not listed in AUTHORS file.' % missing)
|
stephenfin/patchwork
|
patchwork/api/event.py
|
Python
|
gpl-2.0
| 3,356
| 0
|
# Patchwork - automated patch tracking system
# Copyright (C) 2017 Stephen Finucane <stephen@that.guru>
#
# SPDX-License-Identifier: GPL-2.0-or-later
from collections import OrderedDict
from rest_framework.generics import ListAPIView
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import SerializerMethodField
from patchwork.api.embedded import CheckSerializer
from patchwork.api.embedded import CoverLetterSerializer
from patchwork.api.embedded import PatchSerializer
from patchwork.api.embedded import ProjectSerializer
from patchwork.api.embedded import SeriesSerializer
from patchwork.api.embedded import UserSerializer
from patchwork.api.filters import EventFilterSet
from patchwork.api.patch import StateField
from patchwork.models import Event
class EventSerializer(ModelSerializer):
project = ProjectSerializer(read_only=True)
patch = PatchSerializer(read_only=True)
series = SeriesSerializer(read_only=True)
cover = CoverLetterSerializer(read_only=True)
previous_state = StateField()
current_state = StateField()
previous_delegate = UserSerializer()
current_delegate = UserSerializer()
created_check = SerializerMethodField()
created_check = CheckSerializer()
_category_map = {
Event.CATEGORY_COVER_CREATED: ['cover'],
Event.CATEGORY_PATCH_CREATED: ['patch'],
Event.CATEGORY_PATCH_COMPLETED: ['patch', 'series'],
Event.CATEGORY_PATCH_STATE_CHANGED: ['patch', 'previous_state',
'current_state'],
Event.CATEGORY_PATCH_DELEGATED: ['patch', 'previous_delegate',
'current_delegate'],
Event.CATEGORY_CHECK_CREATED: ['patch', 'created_check'],
Event.CATEGORY_SERIES_CREATED: ['series'],
Event.CATEGORY_SERIES_COMPLETED: ['series'],
}
def to_representation(self, instance):
data = super(EventSerializer, self).to_representation(instance)
payload = OrderedDict()
kept_fields = self._category_map[instance.category] + [
'id', 'category'
|
, 'project', 'date']
for field in [x for x in data]:
|
if field not in kept_fields:
del data[field]
elif field in self._category_map[instance.category]:
field_name = 'check' if field == 'created_check' else field
payload[field_name] = data.pop(field)
data['payload'] = payload
return data
class Meta:
model = Event
fields = ('id', 'category', 'project', 'date', 'patch', 'series',
'cover', 'previous_state', 'current_state',
'previous_delegate', 'current_delegate', 'created_check')
read_only_fields = fields
class EventList(ListAPIView):
"""List events."""
serializer_class = EventSerializer
filter_class = filterset_class = EventFilterSet
page_size_query_param = None # fixed page size
ordering_fields = ()
ordering = '-date'
def get_queryset(self):
return Event.objects.all()\
.prefetch_related('project', 'patch', 'series', 'cover',
'previous_state', 'current_state',
'previous_delegate', 'current_delegate',
'created_check')
|
ljwolf/pysal
|
pysal/contrib/gwr/tests/test_gwr.py
|
Python
|
bsd-3-clause
| 45,280
| 0.009408
|
"""
GWR is tested against results from GWR4
"""
import unittest
import pickle as pk
from pysal.contrib.gwr.gwr import GWR
from pysal.contrib.gwr.sel_bw import Sel_BW
from pysal.contrib.gwr.diagnostics import get_AICc, get_AIC, get_BIC, get_CV
from pysal.contrib.glm.family import Gaussian, Poisson, Binomial
import numpy as np
import pysal
class TestGWRGaussian(unittest.TestCase):
def setUp(self):
data = pysal.open(pysal.examples.get_path('GData_utm.csv'))
self.coords = zip(data.by_col('X'), data.by_col('Y'))
self.y = np.array(data.by_col('PctBach')).reshape((-1,1))
rural = np.array(data.by_col('PctRural')).reshape((-1,1))
pov = np.array(data.by_col('PctPov')).reshape((-1,1))
black = np.array(data.by_col('PctBlack')).reshape((-1,1))
self.X = np.hsta
|
ck([rural, pov, black])
self.BS_F = pysal.open(pysal.examples.get_path('georgia_BS_F_listwise.csv'))
self.BS_NN = pysal.open(pysal.examples.get_path('georgia_BS_NN_listwise.csv'))
self.GS_F = pysal.open(pysal.examples.get_path('georgia_GS_F_li
|
stwise.csv'))
self.GS_NN = pysal.open(pysal.examples.get_path('georgia_GS_NN_listwise.csv'))
self.MGWR = pk.load(open(pysal.examples.get_path('FB.p'), 'r'))
self.XB = pk.load(open(pysal.examples.get_path('XB.p'), 'r'))
self.err = pk.load(open(pysal.examples.get_path('err.p'), 'r'))
def test_BS_F(self):
est_Int = self.BS_F.by_col(' est_Intercept')
se_Int = self.BS_F.by_col(' se_Intercept')
t_Int = self.BS_F.by_col(' t_Intercept')
est_rural = self.BS_F.by_col(' est_PctRural')
se_rural = self.BS_F.by_col(' se_PctRural')
t_rural = self.BS_F.by_col(' t_PctRural')
est_pov = self.BS_F.by_col(' est_PctPov')
se_pov = self.BS_F.by_col(' se_PctPov')
t_pov = self.BS_F.by_col(' t_PctPov')
est_black = self.BS_F.by_col(' est_PctBlack')
se_black = self.BS_F.by_col(' se_PctBlack')
t_black = self.BS_F.by_col(' t_PctBlack')
yhat = self.BS_F.by_col(' yhat')
res = np.array(self.BS_F.by_col(' residual'))
std_res = np.array(self.BS_F.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.BS_F.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.BS_F.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.BS_F.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=209267.689, fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 894.0)
self.assertAlmostEquals(np.floor(AIC), 890.0)
self.assertAlmostEquals(np.floor(BIC), 944.0)
self.assertAlmostEquals(np.round(CV,2), 18.25)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_BS_NN(self):
est_Int = self.BS_NN.by_col(' est_Intercept')
se_Int = self.BS_NN.by_col(' se_Intercept')
t_Int = self.BS_NN.by_col(' t_Intercept')
est_rural = self.BS_NN.by_col(' est_PctRural')
se_rural = self.BS_NN.by_col(' se_PctRural')
t_rural = self.BS_NN.by_col(' t_PctRural')
est_pov = self.BS_NN.by_col(' est_PctPov')
se_pov = self.BS_NN.by_col(' se_PctPov')
t_pov = self.BS_NN.by_col(' t_PctPov')
est_black = self.BS_NN.by_col(' est_PctBlack')
se_black = self.BS_NN.by_col(' se_PctBlack')
t_black = self.BS_NN.by_col(' t_PctBlack')
yhat = self.BS_NN.by_col(' yhat')
res = np.array(self.BS_NN.by_col(' residual'))
std_res = np.array(self.BS_NN.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.BS_NN.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.BS_NN.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.BS_NN.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=90.000, fixed=False)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_AIC(rslt)
BIC = get_BIC(rslt)
CV = get_CV(rslt)
self.assertAlmostEquals(np.floor(AICc), 896.0)
self.assertAlmostEquals(np.floor(AIC), 892.0)
self.assertAlmostEquals(np.floor(BIC), 941.0)
self.assertAlmostEquals(np.around(CV, 2), 19.19)
np.testing.assert_allclose(est_Int, rslt.params[:,0], rtol=1e-04)
np.testing.assert_allclose(se_Int, rslt.bse[:,0], rtol=1e-04)
np.testing.assert_allclose(t_Int, rslt.tvalues[:,0], rtol=1e-04)
np.testing.assert_allclose(est_rural, rslt.params[:,1], rtol=1e-04)
np.testing.assert_allclose(se_rural, rslt.bse[:,1], rtol=1e-04)
np.testing.assert_allclose(t_rural, rslt.tvalues[:,1], rtol=1e-04)
np.testing.assert_allclose(est_pov, rslt.params[:,2], rtol=1e-04)
np.testing.assert_allclose(se_pov, rslt.bse[:,2], rtol=1e-04)
np.testing.assert_allclose(t_pov, rslt.tvalues[:,2], rtol=1e-04)
np.testing.assert_allclose(est_black, rslt.params[:,3], rtol=1e-02)
np.testing.assert_allclose(se_black, rslt.bse[:,3], rtol=1e-02)
np.testing.assert_allclose(t_black, rslt.tvalues[:,3], rtol=1e-02)
np.testing.assert_allclose(yhat, rslt.mu, rtol=1e-05)
np.testing.assert_allclose(res, rslt.resid_response, rtol=1e-04)
np.testing.assert_allclose(std_res, rslt.std_res, rtol=1e-04)
np.testing.assert_allclose(localR2, rslt.localR2, rtol=1e-05)
np.testing.assert_allclose(inf, rslt.influ, rtol=1e-04)
np.testing.assert_allclose(cooksD, rslt.cooksD, rtol=1e-00)
def test_GS_F(self):
est_Int = self.GS_F.by_col(' est_Intercept')
se_Int = self.GS_F.by_col(' se_Intercept')
t_Int = self.GS_F.by_col(' t_Intercept')
est_rural = self.GS_F.by_col(' est_PctRural')
se_rural = self.GS_F.by_col(' se_PctRural')
t_rural = self.GS_F.by_col(' t_PctRural')
est_pov = self.GS_F.by_col(' est_PctPov')
se_pov = self.GS_F.by_col(' se_PctPov')
t_pov = self.GS_F.by_col(' t_PctPov')
est_black = self.GS_F.by_col(' est_PctBlack')
se_black = self.GS_F.by_col(' se_PctBlack')
t_black = self.GS_F.by_col(' t_PctBlack')
yhat = self.GS_F.by_col(' yhat')
res = np.array(self.GS_F.by_col(' residual'))
std_res = np.array(self.GS_F.by_col(' std_residual')).reshape((-1,1))
localR2 = np.array(self.GS_F.by_col(' localR2')).reshape((-1,1))
inf = np.array(self.GS_F.by_col(' influence')).reshape((-1,1))
cooksD = np.array(self.GS_F.by_col(' CooksD')).reshape((-1,1))
model = GWR(self.coords, self.y, self.X, bw=87308.298,
kernel='gaussian', fixed=True)
rslt = model.fit()
AICc = get_AICc(rslt)
AIC = get_A
|
Vdragon/git-cola
|
cola/difftool.py
|
Python
|
gpl-2.0
| 6,355
| 0.000157
|
from __future__ import division, absolute_import, unicode_literals
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from . import cmds
from . import gitcmds
from . import hotkeys
from . import icons
from . import qtutils
from . import utils
from .i18n import N_
from .widgets import completion
from .widgets import defs
from .widgets import filetree
from .widgets import standard
def diff_commits(parent, a, b, context=None):
"""Show a dialog for diffing two commits"""
dlg = Difftool(parent, a=a, b=b, context=context)
dlg.show()
dlg.raise_()
return dlg.exec_() == QtWidgets.QDialog.Accepted
def diff_expression(parent, expr,
create_widget=False, hide_expr=False,
focus_tree=False, cont
|
ext=None):
"""Show
|
a diff dialog for diff expressions"""
dlg = Difftool(parent,
expr=expr, hide_expr=hide_expr,
focus_tree=focus_tree, context=context)
if create_widget:
return dlg
dlg.show()
dlg.raise_()
return dlg.exec_() == QtWidgets.QDialog.Accepted
class Difftool(standard.Dialog):
def __init__(self, parent, a=None, b=None, expr=None, title=None,
hide_expr=False, focus_tree=False, context=None):
"""Show files with differences and launch difftool"""
standard.Dialog.__init__(self, parent=parent)
self.a = a
self.b = b
self.diff_expr = expr
self.context = context
if title is None:
title = N_('git-cola diff')
self.setWindowTitle(title)
self.setWindowModality(Qt.WindowModal)
self.expr = completion.GitRefLineEdit(parent=self)
if expr is not None:
self.expr.setText(expr)
if expr is None or hide_expr:
self.expr.hide()
self.tree = filetree.FileTree(parent=self)
self.diff_button = qtutils.create_button(text=N_('Compare'),
icon=icons.diff(),
enabled=False,
default=True)
self.diff_button.setShortcut(hotkeys.DIFF)
self.diff_all_button = qtutils.create_button(text=N_('Compare All'),
icon=icons.diff())
self.edit_button = qtutils.edit_button()
self.edit_button.setShortcut(hotkeys.EDIT)
self.close_button = qtutils.close_button()
self.button_layout = qtutils.hbox(defs.no_margin, defs.spacing,
self.close_button,
qtutils.STRETCH,
self.edit_button,
self.diff_all_button,
self.diff_button)
self.main_layout = qtutils.vbox(defs.margin, defs.spacing,
self.expr, self.tree,
self.button_layout)
self.setLayout(self.main_layout)
self.tree.itemSelectionChanged.connect(self.tree_selection_changed)
self.tree.itemDoubleClicked.connect(self.tree_double_clicked)
self.tree.up.connect(self.focus_input)
self.expr.textChanged.connect(self.text_changed)
self.expr.activated.connect(self.focus_tree)
self.expr.down.connect(self.focus_tree)
self.expr.enter.connect(self.focus_tree)
qtutils.connect_button(self.diff_button, self.diff)
qtutils.connect_button(self.diff_all_button,
lambda: self.diff(dir_diff=True))
qtutils.connect_button(self.edit_button, self.edit)
qtutils.connect_button(self.close_button, self.close)
qtutils.add_action(self, 'Focus Input', self.focus_input, hotkeys.FOCUS)
qtutils.add_action(self, 'Diff All', lambda: self.diff(dir_diff=True),
hotkeys.CTRL_ENTER, hotkeys.CTRL_RETURN)
qtutils.add_close_action(self)
self.init_state(None, self.resize_widget, parent)
self.refresh()
if focus_tree:
self.focus_tree()
def resize_widget(self, parent):
"""Set the initial size of the widget"""
width, height = qtutils.default_size(parent, 720, 420)
self.resize(width, height)
def focus_tree(self):
"""Focus the files tree"""
self.tree.setFocus()
def focus_input(self):
"""Focus the expression input"""
self.expr.setFocus()
def text_changed(self, txt):
self.diff_expr = txt
self.refresh()
def refresh(self):
"""Redo the diff when the expression changes"""
if self.diff_expr is not None:
self.diff_arg = utils.shell_split(self.diff_expr)
elif self.b is None:
self.diff_arg = [self.a]
else:
self.diff_arg = [self.a, self.b]
self.refresh_filenames()
def refresh_filenames(self):
if self.a and self.b is None:
filenames = gitcmds.diff_index_filenames(self.a)
else:
filenames = gitcmds.diff(self.diff_arg)
self.tree.set_filenames(filenames, select=True)
def tree_selection_changed(self):
has_selection = self.tree.has_selection()
self.diff_button.setEnabled(has_selection)
self.diff_all_button.setEnabled(has_selection)
def tree_double_clicked(self, item, column):
path = self.tree.filename_from_item(item)
left, right = self._left_right_args()
cmds.difftool_launch(left=left, right=right, paths=[path],
context=self.context)
def diff(self, dir_diff=False):
paths = self.tree.selected_filenames()
left, right = self._left_right_args()
cmds.difftool_launch(left=left, right=right, paths=paths,
dir_diff=dir_diff, context=self.context)
def _left_right_args(self):
if self.diff_arg:
left = self.diff_arg[0]
else:
left = None
if len(self.diff_arg) > 1:
right = self.diff_arg[1]
else:
right = None
return (left, right)
def edit(self):
paths = self.tree.selected_filenames()
cmds.do(cmds.Edit, paths)
|
thePetrMarek/SequenceOfDigitsRecognition
|
sequences_of_variable_length/deep_localization_weighted_loss_variable_length_deeper.py
|
Python
|
mit
| 7,054
| 0.00241
|
import tensorflow as tf
'''
Model for sequence classification and localization with weighted loss
'''
class DeepLocalizationWeightedLossVariableLengthDeeper:
def get_name(self):
return "deep_localization_weighted_loss_variable_length_6"
def input_placeholders(self):
inputs_placeholder = tf.placeholder(tf.float32, shape=[None, 128, 256], name="inputs")
labels_placeholder = tf.placeholder(tf.float32, shape=[None, 5, 11], name="labels")
positions_placeholder = tf.placeholder(tf.float32, shape=[None, 4], name="positions")
keep_prob_placeholder = tf.placeholder(tf.float32)
is_training_placeholder = tf.placeholder(tf.bool)
return inputs_placeholder, labels_placeholder, positions_placeholder, keep_prob_placeholder, is_training_placeholder
def inference(self, input, keep_prob, is_training):
with tf.name_scope("inference"):
input = tf.reshape(input, [-1, 128, 256, 1])
conv1 = self._convolutional(input, [10, 10, 1, 8])
relu1 = self._relu(conv1)
max_pool1 = self._max_pooling(relu1, [1, 2, 2, 1], [1, 2, 2, 1])
conv2 = self._convolutional(max_pool1, [8, 8, 8, 14])
relu2 = self._relu(conv2)
max_pool2 = self._max_pooling(relu2, [1, 2, 2, 1], [1, 2, 2, 1])
conv3 = self._convolutional(max_pool2, [6, 6, 14, 20])
relu3 = self._relu(conv3)
max_pool3 = self._max_pooling(relu3, [1, 2, 2, 1], [1, 2, 2, 1])
conv4 = self._convolutional(max_pool3, [4, 4, 20, 24])
relu4 = self._relu(conv4)
max_pool4 = self._max_pooling(relu4, [1, 2, 2, 1], [1, 2, 2, 1])
conv5 = self._convolutional(max_pool4, [2, 2, 24, 32])
relu5 = self._relu(conv5)
max_pool5 = self._max_pooling(relu5, [1, 2, 2, 1], [1, 2, 2, 1])
conv6 = self._convolutional(max_pool5, [2, 2, 32, 128])
relu6 = self._relu(conv6)
max_pool6 = self._max_pooling(relu6
|
, [1, 2, 2, 1], [1, 2, 2, 1])
reshaped = tf.reshape(max_pool6, [-1, 1024])
logits = []
gru = tf.contrib.rnn.GRUCell(576)
state = gru.zero_state(tf.shape(reshaped)[0], tf.float32)
with tf.variable_sco
|
pe("RNN"):
for i in range(5):
if i > 0: tf.get_variable_scope().reuse_variables()
output, state = gru(reshaped, state)
number_logits = self._fully_connected(output, 576, 11)
logits.append(number_logits)
fc_position1 = self._fully_connected(reshaped, 1024, 768)
dropout_position_1 = tf.nn.dropout(fc_position1, keep_prob)
relu_position1 = self._relu(dropout_position_1)
fc_position2 = self._fully_connected(relu_position1, 768, 512)
dropout_position_2 = tf.nn.dropout(fc_position2, keep_prob)
relu_position2 = self._relu(dropout_position_2)
fc_position3 = self._fully_connected(relu_position2, 512, 256)
dropout_position_3 = tf.nn.dropout(fc_position3, keep_prob)
relu_position3 = self._relu(dropout_position_3)
fc_position4 = self._fully_connected(relu_position3, 256, 64)
dropout_position_4 = tf.nn.dropout(fc_position4, keep_prob)
relu_position4 = self._relu(dropout_position_4)
fc_position5 = self._fully_connected(relu_position4, 64, 32)
dropout_position_5 = tf.nn.dropout(fc_position5, keep_prob)
relu_position5 = self._relu(dropout_position_5)
predicted_positions = self._fully_connected(relu_position5, 32, 4)
return tf.stack(logits, axis=1), predicted_positions
def loss(self, logits, labels, predicted_positions, positions):
with tf.name_scope("loss"):
labels = tf.to_int64(labels)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits, name="cross_entropy")
logits_loss = tf.reduce_mean(cross_entropy, name="cross_entropy_mean")
square_error = tf.square(positions - predicted_positions, name="square_error")
position_loss = tf.reduce_mean(square_error, name="square_error_mean")
total_loss = 1000 * logits_loss + position_loss
tf.summary.scalar("logits_loss", logits_loss)
tf.summary.scalar("positions_loss", position_loss)
tf.summary.scalar("total_loss", logits_loss + position_loss)
return {"logits_loss": logits_loss, "positions_loss": position_loss,
"total_loss": total_loss}
def training(self, loss, learning_rate):
with tf.name_scope("training"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_operation = optimizer.minimize(loss)
return train_operation
def evaluation(self, logits, labels, predicted_positions, positions):
with tf.name_scope("evaluation"):
labels = tf.to_int64(labels)
labels = tf.argmax(labels, 2)
logits = tf.argmax(logits, 2)
difference = tf.subtract(labels, logits, name="sub")
character_errors = tf.count_nonzero(difference, axis=1, name="count_nonzero")
total_wrong_characters = tf.reduce_sum(character_errors)
total_characters = tf.to_int64(tf.size(labels))
total_correct_characters = total_characters - total_wrong_characters
corrects = tf.less_equal(character_errors, 0, name="is_zero")
position_error = tf.losses.mean_squared_error(positions, predicted_positions)
return self.tf_count(corrects,
True), corrects, logits, position_error, predicted_positions, total_correct_characters, total_characters
def tf_count(self, t, val):
elements_equal_to_value = tf.equal(t, val)
as_ints = tf.cast(elements_equal_to_value, tf.int32)
count = tf.reduce_sum(as_ints)
return count
def _fully_connected(self, input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="b")
act = tf.matmul(input, w) + b
return act
def _convolutional(self, input, dimensions, name="conv"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal(dimensions, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[dimensions[3]]), name="b")
return tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME') + b
def _max_pooling(self, input, ksize, strides, name="max_pooling"):
with tf.name_scope(name):
return tf.nn.max_pool(input, ksize, strides, padding="SAME")
def _relu(self, input, name="relu"):
with tf.name_scope(name):
return tf.nn.relu(input)
|
n3011/deeprl
|
train_dqn.py
|
Python
|
mit
| 1,042
| 0.00096
|
# -------------------------------------------------------------------#
# Released under the MIT license (https://opensource.org/licenses/MIT)
# Contact: mrinal.haloi11@gmail.com
# Enhancement Copyright 2016, Mrinal Haloi
# -------------------------------------------------------------------#
import random
import os
import tensorflow as tf
from core.solver import Solver
from env.environment import GymEnvironment, SimpleGymEnvironment
from config.c
|
onfig import cfg
# Set random seed
tf.set_random_seed(123)
random.seed(12345)
def main(_):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
if cfg.env_type == 'simple':
env = SimpleGymEnvironment(cfg)
else:
env = GymEnvironment(cfg)
if not os.path.exists('/
|
tmp/model_dir'):
os.mkdir('/tmp/model_dir')
solver = Solver(cfg, env, sess, '/tmp/model_dir')
solver.train()
if __name__ == '__main__':
tf.app.run()
|
firebitsbr/termineter
|
framework/modules/get_modem_info.py
|
Python
|
gpl-3.0
| 3,174
| 0.015123
|
# framework/modules/get_modem_info.py
#
# Copyright 2011 Spencer J. McIntyre <SMcIntyre [at] SecureState [dot] net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from c1218.errors import C1218ReadTableError
from c1219.access.telephone import C1219TelephoneAccess
from c1219.data import C1219_CALL_STATUS_FLAGS
from framework.templates import TermineterModuleOptical
class Module(TermineterModuleOptical):
def __init__(self, *args, **kwargs):
TermineterModuleOptical.__init__(self, *args, **kwargs)
self.version = 1
self.author = ['Spencer McIntyre']
self.description = 'Get Information About The Integrated Modem'
self.detailed_description = 'This module reads various C1219 tables from decade 90 to gather information about the integrated modem. If successfully parsed, useful information will be displayed.'
def run(self):
conn = self.frmwk.serial_connection
logger = self.logger
if not self.frmwk.serial_login(): # don't alert on failed logins
logger.warning('meter login failed')
try:
telephone_ctl = C1219TelephoneAccess(conn)
except C1218ReadTableError:
self.frmwk.print_error('Could not read necessary tables, a modem is not likely present')
return
conn.stop()
info = {}
info['Can Answer'] = telephone_ctl.can_answer
info['Extended Status Available'] = telephone_ctl.use_extended_status
info['Number of Originating Phone Numbers'] = telephone_ctl.nbr_originate_numbers
info['PSEM Identity'] = telephone_ctl.psem_identity
if telephone_ctl.global_bit_rate:
info['Global Bit Rate'] = telephone_ctl.global_bit_rate
else:
info['Originate Bit Rate'] = telephone_ctl.originate_bit_rate
info['Answer Bit Rate'] = telepho
|
ne_ctl.answer_bit_ra
|
te
info['Dial Delay'] = telephone_ctl.dial_delay
if len(telephone_ctl.prefix_number):
info['Prefix Number'] = telephone_ctl.prefix_number
keys = info.keys()
keys.sort()
self.frmwk.print_status('General Information:')
fmt_string = " {0:.<38}.{1}"
for key in keys:
self.frmwk.print_status(fmt_string.format(key, info[key]))
self.frmwk.print_status('Stored Telephone Numbers:')
fmt_string = " {0:<6} {1:<16} {2:<32}"
self.frmwk.print_status(fmt_string.format('Index', 'Number', 'Last Status'))
self.frmwk.print_status(fmt_string.format('-----', '------', '-----------'))
for idx, entry in telephone_ctl.originating_numbers.items():
self.frmwk.print_status(fmt_string.format(entry['idx'], entry['number'].strip(), C1219_CALL_STATUS_FLAGS[entry['status']]))
|
ray-project/ray
|
rllib/utils/exploration/slate_soft_q.py
|
Python
|
apache-2.0
| 1,483
| 0.000674
|
from typing import Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.uti
|
ls.exploration.exploration import TensorType
from ray.rllib.utils.exploration.soft_q import SoftQ
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class SlateSoftQ(SoftQ):
@override(SoftQ)
def get_exploration_action(
self,
action_distribution: ActionD
|
istribution,
timestep: Union[int, TensorType],
explore: bool = True,
):
assert (
self.framework == "torch"
), "ERROR: SlateSoftQ only supports torch so far!"
cls = type(action_distribution)
# Re-create the action distribution with the correct temperature
# applied.
action_distribution = cls(
action_distribution.inputs, self.model, temperature=self.temperature
)
batch_size = action_distribution.inputs.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
self.last_timestep = timestep
# Explore.
if explore:
# Return stochastic sample over (q-value) logits.
action = action_distribution.sample()
# Return the deterministic "sample" (argmax) over (q-value) logits.
else:
action = action_distribution.deterministic_sample()
return action, action_logp
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/data_providers/price_history_pack.py
|
Python
|
agpl-3.0
| 2,640
| 0.003788
|
import numpy as np
class PriceHistoryPack(object):
def __init__(self, input_seq_len, num_features, target_seq_len):
super(PriceHistoryPack, self).__init__()
self.sku_ids = []
self.XX = np.empty((0, input_seq_len, num_features))
self.YY = np.empty((0, target_seq_len))
self.sequence_lens = []
self.seq_mask = np.empty((0, input_seq_len))
def update(self, sku_id, inputs, targets, input_seq_len):
self.sku_ids.append(sku_id)
inputs_len = len(inputs)
self.sequence_lens.append(inputs_len)
# build current mask with zeros and ones
cur_mask = np.zeros(input_seq_len)
cur_mask[:inputs_len] = 1 # only the valid firsts should have the value of one
xx_padded = np.pad(inputs, ((0, input_seq_len - inputs_len), (0, 0)), mode='constant', constant_values=0.)
# here targets do NOT need to be padded because we do not have a sequence to sequence model
# yy_padded = np.pad(targets, (0, series_max_len - len(targets)), mode='constant', constant_values=0.)
assert len(xx_padded) == input_seq_len
self.XX = np.vstack((self.XX, xx_padded[np.newaxis]))
self.YY = np.vstack((self.YY, targets[np.newaxis]))
self.seq_mask = np.vstack((self.seq_mask, cur_mask[np.newaxis]))
def get_data(self, fraction=None, random_state=None):
# from sklearn.model_selection import train_test_split
skuIds, xx, yy
|
, seqLens, seqMask = np.array(self.sku_ids), self.XX, self.YY, np.array(
self.sequence_lens), self.seq_mask
if fraction is None:
return skuIds, xx, yy, seqLens, seqMask
else:
random_state = np.random if random_state is None el
|
se random_state
cur_len = len(skuIds)
assert cur_len == len(xx) and cur_len == len(yy) and cur_len == len(seqLens) and cur_len == len(seqMask)
random_inds = random_state.choice(cur_len, int(cur_len * fraction))
return skuIds[random_inds], xx[random_inds], yy[random_inds], seqLens[random_inds], seqMask[random_inds]
def save(self, filepath, fraction=None, random_state=None):
if fraction is None:
np.savez(filepath, sku_ids=self.sku_ids, inputs=self.XX, targets=self.YY,
sequence_lengths=self.sequence_lens,
sequence_masks=self.seq_mask)
else:
skuIds, xx, yy, seqLens, seqMask = self.get_data(fraction=fraction, random_state=random_state)
np.savez(filepath, sku_ids=skuIds, inputs=xx, targets=yy, sequence_lengths=seqLens, sequence_masks=seqMask)
|
hakril/PythonForWindows
|
tests/test_midl.py
|
Python
|
bsd-3-clause
| 380
| 0.013158
|
import windows.generat
|
ed_def as gdef
def test_format_charactere_values():
assert gdef.FC_ZERO == 0
assert gdef.FC_PAD == 0x5c
assert gdef.FC_PAD == 0x5c
assert gdef.FC_SPLIT_DEREFERENCE == 0x74
assert gdef. FC_SPLIT_DIV_2 == 0x75
as
|
sert gdef.FC_HARD_STRUCT == 0xb1
assert gdef.FC_TRANSMIT_AS_PTR == 0xb2
assert gdef.FC_END_OF_UNIVERSE == 0xba
|
jansohn/pyload
|
module/plugins/hoster/HighWayMe.py
|
Python
|
gpl-3.0
| 2,588
| 0.008114
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
from module.plugins.internal.SimpleHoster import seconds_to_midnight
class HighWayMe(MultiHoster):
__name__ = "HighWayMe"
__type__ = "hoster"
__version__ = "0.15"
__status__ = "testing"
__pattern__ = r'https?://.+high-way\.my'
__config__ = [("use_premium" , "bool", "Use premium account if available" , True),
("revertfailed", "bool", "Revert to standard download if fails", True)]
__description__ = """High-Way.me multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def setup(self):
self.chunk_limit = 4
def check_errors(self):
if self.html.get('code') == 302: #@NOTE: This is not working. It should by if 302 Moved Temporarily then... But I don't now how to implement it.
self.account.relogin()
self.retry()
elif "<code>9</code>" in self.html:
self.offline()
elif "downloadlimit" in self.html:
self.log_warning(_("Reached maximum connctions"))
self.retry(5, 60, _("Reached maximum connctions"))
elif "trafficlimit" in self.html:
self.log_warning(_("Reached daily limit"))
self.retry(wait=seconds_to_midnight(), msg="Daily limit for this host reached")
elif "<code>8</code>" in self.html:
self
|
.log_warning(_("Hoster temporarily unavailable, waiting 1 minute and retry"))
self.retry(5, 60, _("Hoster is temporarily unavailable"))
def handle_premium(self, pyfile):
for _i in xrange(5):
self.html = self.load("https://high-way.me/load.php",
|
get={'link': self.pyfile.url})
if self.html:
self.log_debug("JSON data: " + self.html)
break
else:
self.log_info(_("Unable to get API data, waiting 1 minute and retry"))
self.retry(5, 60, _("Unable to get API data"))
self.check_errors()
try:
self.pyfile.name = re.search(r'<name>([^<]+)</name>', self.html).group(1)
except AttributeError:
self.pyfile.name = ""
try:
self.pyfile.size = re.search(r'<size>(\d+)</size>', self.html).group(1)
except AttributeError:
self.pyfile.size = 0
self.link = re.search(r'<download>([^<]+)</download>', self.html).group(1)
getInfo = create_getInfo(HighWayMe)
|
wevoice/wesub
|
apps/activity/migrations/0002_auto__add_activitymigrationprogress.py
|
Python
|
agpl-3.0
| 21,651
| 0.00799
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ActivityMigrationProgress'
db.create_table('activity_activitymigrationprogress', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_migrated_id', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('activity', ['ActivityMigrationProgress'])
def backwards(self, orm):
# Deleting model 'ActivityMigrationProgress'
db.delete_table('activity_activitymigrationprogress')
models = {
'activity.activitymigrationprogress': {
'Meta': {'object_name': 'ActivityMigrationProgress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_migrated_id': ('django.db.models.fields.IntegerField', [], {})
},
'activity.activityrecord': {
'Meta': {'object_name': 'ActivityRecord'},
'copied_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['activity.ActivityRecord']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 5, 19, 0, 0)', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'related_obj_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activity'", 'null': 'True', 'to': "orm['teams.Team']"}),
'type': ('codefield.CodeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activity'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'activity'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'})
},
'activity.urledit': {
'Meta': {'object_name': 'URLEdit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'blank': 'True'}),
'old_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'blank': 'True'})
},
'activity.videodeletion': {
'Meta': {'object_name': 'VideoDeletion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'blank': 'True'})
},
'auth.customu
|
ser': {
|
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_users'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'pay_rate_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'show_tutorial': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'ob
|
kozyarchuk/NCT-workers
|
tests/perf/pscrap.py
|
Python
|
gpl-2.0
| 1,216
| 0.011513
|
from nct.utils.alch import Session, LSession
from nct.domain.instrument import Instrument
import random
import functools
import time
from nct.deploy.deploy import Deployer
import cProfile
INSTRUMENTS = ['GOOGL.O', 'TWTR.N', 'GS.N', 'BAC.N', 'IBM.N']
def profile_method(file_name = None):
def gen_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
f = func
|
cProfile.runctx('f(*args,**kwargs)', globals(), locals(), file_name)
print("Done writing")
return wrapper
return gen_wrapper
def time_it(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
func(*args,**kwargs)
print("It took {}".format(time.time() - start))
return wrapper
HASH_CACHE = {}
@profile_method(r"c:\temp\instrument_123.out")
def do_a_bunch():
|
s = LSession()
name = INSTRUMENTS[int(random.random()*100)%len(INSTRUMENTS)]
instr_id = s.query(Instrument).filter_by(name=name).one().id
for _ in range(10000):
s.query(Instrument).get(instr_id)
s.close()
import sys
print (sys.version)
Deployer(LSession).deploy()
print ("Deployed")
for _ in range(1):
do_a_bunch()
|
mcmaxwell/idea_digital_agency
|
idea/feincms/module/extensions/ct_tracker.py
|
Python
|
mit
| 255
| 0
|
# flake8: noqa
from
|
__future__ import absolute_import, unicode_literals
import warnings
from feincms.extensions.ct_tracker import *
warnings.warn(
'Import %s from feincms.extensions.%s' % (__name__, __name__),
DeprecationWarning, stacklev
|
el=2)
|
Ripsnorta/pyui2
|
widgets/formpanel.py
|
Python
|
lgpl-2.1
| 7,289
| 0.006311
|
# pyui2
# Copyright (C) 2001-2002 Sean C. Riley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pyui2
from pyui2.desktop import getDesktop, getTheme
from pyui2.panel import Panel
from pyui2.layouts import Much
class FormPanel(Panel):
"""A Panel that shows data about an object and allows it to be updated.
The "fields" argument is a list of data fields to populate the panel with. It
is in the format:
[ (type, name, label, vspan, data),
(type, name, label, vspan, data)
]
where type is one the fieldTypes below, vspan is the vertical height of the widget,
and data is speficic data for the type of form widget to be used.
"""
fieldTypes = [
"string",
"int",
"float",
"text",
"password",
"slider",
"checkbox",
"list",
"dropdownlist",
"label"
]
def __init__(self, fieldList):
self.fieldList = fieldList
Panel.__init__(self)
self.object = None
# setup layout
num = 0
span = 0
for t, n, l, vspan, d in fieldList:
span = span + vspan
self.setLayout(pyui2.layouts.TableLayoutManager( 3, span))
for fieldType, fieldName, fieldLabel, fieldSpan, fieldData in fieldList:
newLabel = Label(fieldLabel)
newWidget = self.createFormWidget(fieldType, fieldData)
self.addChild( n
|
ewLabel, (0,num,1,fieldSpan) )
self.addChild( newWidget, (1,num,2,fieldSpan) )
self.__dict__["label_%s" % fieldName] = newLabel
self.__dict__["widget_%s" % fi
|
eldName] = newWidget
num = num + fieldSpan
self.pack()
def populate(self, object):
"""populate the data fields from the supplied object
"""
self.object = object
for fieldType, fieldName, fieldLabel, fieldSpan, fieldDefault in self.fieldList:
formWidget = self.__dict__["widget_%s" % fieldName]
value = object.__dict__.get(fieldName, None)
self.populateFormWidget(fieldType, formWidget, value)
self.setDirty(1)
def process(self):
"""This takes the data in the form and updates it into the source object.
This assumes that the form has already been populated...
"""
for fieldType, fieldName, fieldLabel, fieldSpan, fieldData in self.fieldList:
formWidget = self.__dict__["widget_%s" % fieldName]
self.processFormWidget(fieldType, fieldName, formWidget)
def createFormWidget(self, fieldType, fieldData):
"""Create the right kind of widget based on the fieldType.
"""
tmp = "create_%s" % fieldType
createMethod = getattr(self, tmp)
if not createMethod:
raise "No widget of type: %s" % tmp
return createMethod(fieldData)
def populateFormWidget(self, fieldType, formWidget, value):
tmp = "populate_%s" % fieldType
populateMethod = getattr(self, tmp)
if not populateMethod:
raise "No widget of type: %s" % fieldType
return populateMethod(formWidget, value)
def processFormWidget(self, fieldType, fieldName, formWidget):
if not self.object:
raise "No object to process to!"
tmp = "process_%s" % fieldType
processMethod = getattr(self, tmp)
if not processMethod:
raise "No process method for %s" % fieldType
return processMethod(formWidget, fieldName)
##### Widget Creation Methods. #####
def create_string(self, size):
return Edit("", size, self._pyui2Edit)
def create_password(self, size):
return Password("", size, self._pyui2Edit)
def create_int(self, dummy):
return NumberEdit("", 12, self._pyui2Edit, 0)
def create_float(self, dummy):
return NumberEdit("", 12, self._pyui2Edit, 0)
def create_text(self, size):
#NOTE: make this a LineDisplay that can be edited...
return Edit("", size, self._pyui2Edit)
def create_slider(self, range):
return SliderBar(self._pyui2Slide, range)
def create_checkbox(self, title):
return CheckBox(title, self._pyui2Check)
def create_list(self, dummy):
return ListBox()
def create_dropdownlist(self, numVisible):
return DropDownBox(numVisible)
def create_label(self, dummy):
return Label("")
###### Widget Populate Methods. #######
def populate_string(self, formWidget, value):
if not value:
formWidget.setText("None")
else:
formWidget.setText("%s" % value)
def populate_float(self, formWidget, value):
if not value:
formWidget.setText("None")
else:
formWidget.setText("%.2f" % value)
populate_password = populate_string
populate_int = populate_string
populate_text = populate_string
populate_label = populate_string
def populate_slider(self, formWidget, value):
formWidget.position = value
def populate_checkbox(self, formWidget, value):
formWidget.setCheck(value)
def populate_list(self, formWidget, items):
#TODO: make a way to get a text value for an item
formWidget.clear()
for item in items:
formWidget.addItem(repr(item), item)
populate_dropdownlist = populate_list
##### Widget Processing Methods #####
def process_string(self, formWidget, fieldName):
setattr(self.object, fieldName, formWidget.text)
process_text = process_string
process_password = process_string
def process_label(self, formWidget, fieldName):
pass
def process_list(self, formWidget, fieldName):
pass
process_dropdownlist = process_list
def process_slider(self, formWidget, fieldName):
setattr(self.object, fieldName, formWidget.position)
def process_checkbox(self, formWidget, fieldName):
setattr(self.object, fieldName, formWidget.checkState)
def process_int(self, formWidget, fieldName):
setattr(self.object, fieldName, int(formWidget.text) )
def process_float(self, formWidget, fieldName):
setattr(self.object, fieldName, float(formWidget.text) )
##### Widget handler methods ######
def _pyui2Slide(self, value):
#print "slid to ", value
pass
def _pyui2Edit(self, edit):
#print "changing value for ", edit
return 1
def _pyui2Check(self, value):
#print "checkbox hit"
pass
|
pglivebackup/pgchain
|
pgchain.py
|
Python
|
mit
| 26,324
| 0.02454
|
#!/bin/python
import sys,os,sqlite3,time,ntpath,psycopg2,grp,pwd
from random import randint
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def is_folder_belongs_to_postgres(folderPath):
stat_info = os.stat(folderPath)
uid = stat_info.st_uid
gid = stat_info.st_gid
user = pwd.getpwuid(uid)[0]
group = grp.getgrgid(gid)[0]
r = 0
if ((str(user).lower() == "postgres") and (str(group).lower() == "postgres")):
r = 1
return r
print ""
print " PG_CHAIN v2017.10 (MIT License)"
print " Created by Doron Yaary (pglivebackup@gmail.com)"
if ((len(sys.argv) == 2) and (str(sys.argv[1]).lower() == "--help")):
print color.BOLD + " ------------------------------------------------------------------
|
----------------------" + color.END
print color.BOLD + " PGCHAIN Help" + color.END
print color.BOLD + " ----------------------------------------------------------------------------------------" + color.END
print ""
print " " + color.UNDERLINE + "General PGCHAIN Usage Syntax:" + color.END
|
print " ./pgchain.py [COMMAND] [ARGUMENTS]"
print ""
print " " + color.UNDERLINE + "Available Commands:" + color.END
print " " + color.BOLD + "base-backup " + color.END + " - Creates a base backup of the local PostgreSQL cluster."
print " " + color.BOLD + "get-wal " + color.END + " - Used in the 'archive_command' for WAL files automation."
print " " + color.BOLD + "list-chains " + color.END + " - Lists the available backup chains (base backup & WAL files)."
print " " + color.BOLD + "clear-history" + color.END + " - Releases old backup chains (and deletes them from disk)."
print " " + color.BOLD + "restore-chain" + color.END + " - Restores the requested chain to the local PostgreSQL cluster."
print " " + color.BOLD + "chain-info " + color.END + " - Displays information abou the requested chain."
print " " + color.BOLD + "show-config " + color.END + " - Displays the configuration information summary."
print " " + color.BOLD + "clear-log " + color.END + " - Clears (truncates) the log file."
print " " + color.BOLD + "create-repo " + color.END + " - Creates the PGCHAIN repository."
print " " + color.BOLD + "keep-recent " + color.END + " - Keeps the most recent backups (according to the given argument)."
print ""
sys.exit(0)
con = None
internal_db_path = ""
internal_pgctl_path = ""
internal_log_enabled = ""
# The following line needs to be changed by you (see installation notes on GitHub)
internal_home_folder = "/pg_chain/"
print ""
if ((len(sys.argv) == 2) and (str(sys.argv[1]).lower() == "create-repo")):
if (os.path.isfile(internal_home_folder + "pgchain.db") == True):
print " " + color.BOLD + "ERROR:" + color.END + " The repository file (pgchain.db) already exists."
print " INFO: If you plan on this name after all, please backup the current one and move it elsewhere first."
print ""
sys.exit(0)
print " " + color.BOLD + "Please Confirm:" + color.END
print " --------------------------------------------------------------"
print " This will create the repository database by using the 'sqlite3' command."
print " The repostiroty database will be created here: " + color.UNDERLINE + str(internal_home_folder) + "pgchain.db" + color.END
ap = raw_input(" Please approve (Y/N): ")
ap = ap.lower()
if (ap != "y"):
print ""
print " You did not approve - nothing changed/created. Quiting."
print ""
sys.exit(0)
print ""
sql = " "
sql = sql + "CREATE TABLE chain_sequence (seq_next_id int not null); "
sql = sql + "CREATE TABLE chains (chain_id int not null, base_backup_full_path varchar(512) not null, chain_start_timestamp datetime not null); "
sql = sql + "CREATE TABLE file_sequence (file_next_id int not null); "
sql = sql + "CREATE TABLE wal_files (file_id int not null, file_full_path varchar(512) not null, file_timestamp datetime not null, file_size_mb int not null); "
sql = sql + "CREATE TABLE chain_files (file_id int not null, parent_chain_id int not null, file_type char(1) not null, file_timestamp datetime not null, file_full_path varchar(512), file_size_mb int); "
sql = sql + "INSERT INTO file_sequence VALUES (1001); "
sql = sql + "INSERT INTO chain_sequence VALUES (1001); "
print ""
print " Creating repository..."
os.system("echo '" + str(sql) + "' | sqlite3 " + str(internal_home_folder) + "pgchain.db")
print " Done."
print ""
sys.exit(0)
if (os.path.isfile(internal_home_folder + "pgchain.conf") == False):
print " " + color.BOLD + "ERROR:" + color.END + " The configuration files could not be found (pgchain.conf)"
print " HINT: Read the documentation regarding the configuration file."
print ""
sys.exit(0)
with open(internal_home_folder + "pgchain.conf") as f:
for line in f:
if (line != ""):
if not line.startswith("#"):
v = line.rstrip()
if (v.lower().startswith("db_path=")):
internal_db_path = v.replace("db_path=","")
if (os.path.isfile(internal_db_path) == False):
print " " + color.BOLD + "ERROR:" + color.END + " The repository file (db file) could not be found."
print " HINT: The configuration file directs to: " + internal_db_path
print " READ: If you never created the repository please use the 'create-repo' argument first."
print ""
sys.exit(0)
try:
con = sqlite3.connect(internal_db_path)
except:
print " " + color.BOLD + "ERROR:" + color.END + " Could not open the database file (unknown reason)"
print " HINT: The configuration file directs to: " + internal_db_path
print ""
sys.exit(0)
if (v.lower().startswith("pgctl_path=")):
internal_pgctl_path = v.replace("pgctl_path=","")
if (os.path.isfile(internal_pgctl_path) == False):
print " " + color.BOLD + "ERROR:" + color.END + " The path for PG_CTL is wrong (in the configuration file)."
print ""
sys.exit(0)
if (v.lower().startswith("log_enabled=")):
internal_log_enabled = v.replace("log_enabled=","")
if ((internal_log_enabled != "1") and (internal_log_enabled != "0")):
print " " + color.BOLD + "ERROR:" + color.END + " the log enabled/disabled parameter value is invalid."
print " HINT: Should be 0 or 1 - the given value is: " + internal_log_enabled
print ""
sys.exit(0)
'''
if (v.lower().startswith("home_folder=")):
internal_home_folder = v.replace("home_folder=","")
if (os.path.isdir(internal_home_folder) == False):
print " " + color.BOLD + "ERROR:" + color.END + " the home folder parameter value is invalid."
print " HINT: The given folder (" + internal_home_folder + ") is not a folder..."
print ""
sys.exit(0)
if (is_folder_belongs_to_postgres(internal_home_folder) == 0):
print " " + color.BOLD + "ERROR:" + color.END + " The home folder does not belong to the user postgres."
print " HINT: This can be fixed by running 'sudo chown -R postgres:postgres " + internal_home_folder + "'."
print ""
sys.exit(0)
if (internal_home_folder.endswith("/") == True):
internal_home_folder = internal_home_folder[:-1]
'''
# The following two lines are for backward compatibility and will be removed in future versions
is_nolog = int(internal_log_enabled)
conf_pg_ctl = internal_pgctl_path
# ---------------------------------------------------------------------------------------------
def adjust_string_size(mystring,maxlength):
a = ""
if (mystring == None):
a = ""
if (mystring != None):
a = mystring
while (len(a) < maxlength):
a = a + str(" ")
return a
def report_log_line(logline):
ts = ""
ts = str(time.strftime("%x")) + " " + str(time.strftime("%X"))
os.system("echo '" + str(ts) + ": " + str(logline) + "' >> " + internal_home_folder + "pgchain.log")
return 0
if (len(sys.argv) < 2):
print " ERROR: Bad arguments or missing arguments."
print ""
con.close()
sys.exit(0)
if (str(sys.
|
allotria/intellij-community
|
python/testData/debug/stepping/test_smart_step_into_native_function_in_return.py
|
Python
|
apache-2.0
| 99
| 0
|
def f(s):
s = s[::-1]
return s
|
.swapcase()
result = f(f(f(f(f('abcdef'))))) # break
|
point
|
bbengfort/TextBlob
|
textblob/nltk/chat/__init__.py
|
Python
|
mit
| 1,546
| 0.001294
|
# Natural Language Toolkit: Chatbots
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Steven Bird <stevenbird1@gmail.com>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Based on an Eliza implementation by Joe Strout <joe@strout.net>,
# Jeff Epler <jepler@inetnebr.com> and Jez Higgins <jez@jezuk.co.uk>.
"""
A class for simple chatbots. These perform simple pattern matching on sentences
typed by users, and respond with automatically generated sentences.
These chatbots may not work using the windows command lin
|
e or the
windows IDLE GUI.
"""
from __future__ import print_function
from .util import Chat
from .eliza import eliza_chat
from .iesha import iesha_chat
from .rude import rude_chat
from .suntsu import suntsu_chat
from .zen import zen_chat
bots = [
(eliza
|
_chat, 'Eliza (psycho-babble)'),
(iesha_chat, 'Iesha (teen anime junky)'),
(rude_chat, 'Rude (abusive bot)'),
(suntsu_chat, 'Suntsu (Chinese sayings)'),
(zen_chat, 'Zen (gems of wisdom)')]
def chatbots():
import sys
print('Which chatbot would you like to talk to?')
botcount = len(bots)
for i in range(botcount):
print(' %d: %s' % (i+1, bots[i][1]))
while True:
print('\nEnter a number in the range 1-%d: ' % botcount, end=' ')
choice = sys.stdin.readline().strip()
if choice.isdigit() and (int(choice) - 1) in range(botcount):
break
else:
print(' Error: bad chatbot number')
chatbot = bots[int(choice)-1][0]
chatbot()
|
jarv/cmdchallenge-site
|
lambda_src/runcmd/dockerpycreds/__init__.py
|
Python
|
mit
| 116
| 0.008621
|
# flake8: noqa
from .store import S
|
tore
from .errors import StoreError, CredentialsNotFound
from .constants import *
|
|
kumar303/rockit
|
vendor-local/boto/rds/dbsnapshot.py
|
Python
|
bsd-3-clause
| 2,724
| 0.001468
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class DBSnapshot(object):
"""
Represents a RDS DB Snapshot
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.engine = None
self.snapshot_create_time = None
self.instance_create_time = None
self.port = None
self.status = None
self.availability_zone = None
self.main_username = None
self.allocated_storage = None
self.instance_id = None
self.availability_zone = None
def __repr__(self):
return 'DBSnapshot:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, n
|
ame, value, connection):
if name == 'Engine':
self.engine = value
elif name == 'InstanceCreateTime':
self.instance_create_time = value
elif name == 'SnapshotCreateTime':
self.snapshot_create_time = value
elif name == 'DBInstan
|
ceIdentifier':
self.instance_id = value
elif name == 'DBSnapshotIdentifier':
self.id = value
elif name == 'Port':
self.port = int(value)
elif name == 'Status':
self.status = value
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'MainUsername':
self.main_username = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'SnapshotTime':
self.time = value
else:
setattr(self, name, value)
|
ArcticWarriors/scouting-app
|
ScoutingWebsite/Scouting2017/view/submissions/submit_bookmark.py
|
Python
|
mit
| 299
| 0
|
'''
Created on Mar 1, 2017
@author: PJ
'''
from Scouting2017.model.reusable_models import Team
from BaseScouting.views.submissions.submit_bookmark import BaseUpdateBookmarks
class UpdateBookmarks2017(BaseUpdateBookmarks):
def __init__(self)
|
:
BaseUpdateBookmarks.__init__(self, Team)
|
|
jamespcole/home-assistant
|
homeassistant/components/ring/sensor.py
|
Python
|
apache-2.0
| 6,215
| 0
|
"""
This component provides HA sensor support for Ring Door Bell/Chimes.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ring/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import ATTRIBUTION, DATA_RING, DEFAULT_ENTITY_NAMESPACE
DEPENDENCIES = ['ring']
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
# Sensor types: Name, category, units, icon, kind
SENSOR_TYPES = {
'battery': [
'Battery', ['doorbell', 'stickup_cams'], '%', 'battery-50', None],
'last_activity': [
'Last Activity', ['doorbell', 'stickup_cams'], None, 'history', None],
'last_ding': [
'Last Ding', ['doorbell'], None, 'history', 'ding'],
'last_motion': [
'Last Motion', ['doorbell', 'stickup_cams'], None,
'history', 'motion'],
'volume': [
'Volume', ['chime', 'doorbell', 'stickup_cams'], None,
'bell-ring', None],
'wifi_signal_category': [
'WiFi Signal Category', ['chime', 'doorbell', 'stickup_cams'], None,
'wifi', None],
'wifi_signal_strength': [
'WiFi Signal Strength', ['chime', 'doorbell', 'stickup_cams'], 'dBm',
'wifi', None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE):
cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Ring device."""
ring = hass.data[DATA_RING]
sensors = []
for device in ring.chimes: # ring.chimes is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'chime' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.doorbells: # ring.doorbells is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'doorbell' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
for device in ring.stickup_cams: # ring.stickup_cams is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'stickup_cams' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingSensor(hass, device, sensor_type))
add_entities(sensors, True)
return True
class RingSensor(Entity):
"""A sensor implementation for Ring device."""
def __init__(self, hass, data, sensor_type):
"""Initialize a sensor for Ring device."""
super(RingSensor, self).__init__()
self._sensor_type = sensor_type
self._data = data
self._extra = None
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[3])
self._kind = SENSOR_TYPES.get(self._sensor_type)[4]
self._name = "{0} {1}".format(
self._data.name, SENSOR_TYPES.get(self._sensor_type)[0])
self._state = None
self._tz = str(hass.config.time_zone)
self._unique_id = '{}-{}'.format(self._data.id, self._sensor_type)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def
|
device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['device_id'] = self._data.id
a
|
ttrs['firmware'] = self._data.firmware
attrs['kind'] = self._data.kind
attrs['timezone'] = self._data.timezone
attrs['type'] = self._data.family
attrs['wifi_name'] = self._data.wifi_name
if self._extra and self._sensor_type.startswith('last_'):
attrs['created_at'] = self._extra['created_at']
attrs['answered'] = self._extra['answered']
attrs['recording_status'] = self._extra['recording']['status']
attrs['category'] = self._extra['kind']
return attrs
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == 'battery' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[2]
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self._name)
self._data.update()
if self._sensor_type == 'volume':
self._state = self._data.volume
if self._sensor_type == 'battery':
self._state = self._data.battery_life
if self._sensor_type.startswith('last_'):
history = self._data.history(limit=5,
timezone=self._tz,
kind=self._kind,
enforce_limit=True)
if history:
self._extra = history[0]
created_at = self._extra['created_at']
self._state = '{0:0>2}:{1:0>2}'.format(
created_at.hour, created_at.minute)
if self._sensor_type == 'wifi_signal_category':
self._state = self._data.wifi_signal_category
if self._sensor_type == 'wifi_signal_strength':
self._state = self._data.wifi_signal_strength
|
ctu-osgeorel/gdal-vfr
|
vfr2pg.py
|
Python
|
mit
| 5,000
| 0.0034
|
#!/usr/bin/env python3
###############################################################################
#
# VFR importer based on GDAL library
#
# Author: Martin Landa <landa.martin gmail.com>
#
# Licence: MIT/X
#
###############################################################################
"""
Imports VFR data to PostGIS database
Requires GDAL library version 1.11 or later.
"""
import sys
import atexit
import argparse
from vfr4ogr import VfrPg
from vfr4ogr.parse import parse_cmd
from vfr4ogr.logger import check_log, VfrLogger
from vfr4ogr.exception import VfrError, VfrErrorCmd
def parse_args():
parser = argparse.ArgumentParser(prog="vfr2pg",
description="Imports VFR data to PostGIS database. "
"Requires GDAL library version 1.11 or later.")
parser.add_argument("-e", "--extended",
action='store_true',
help="Extended layer list statistics")
parser.add_argument("-d", "--download",
action='store_true',
help="Download VFR data to the currect directory (--type required) and exit")
parser.add_argument("-s", "--fileschema",
action='store_true',
help="Create new schema for each VFR file")
parser.add_argument("-g", "--nogeomskip",
action='store_true',
help="Skip features without geometry")
parser.add_argument("-l", "--list",
acti
|
on='store_true',
help="List existing layers in output database and exit")
parser.add_argument("--file",
help="Path to xml.gz|zip or URL list file")
parser.add_argument("--date",
help="Date in for
|
mat 'YYYYMMDD'")
parser.add_argument("--type",
help="Type of request in format XY_ABCD, eg. 'ST_UKSH' or 'OB_000000_ABCD'")
parser.add_argument("--layer",
help="Import only selected layers separated by comma (if not given all layers are processed)")
parser.add_argument("--geom",
help="Preferred geometry 'OriginalniHranice' or 'GeneralizovaneHranice' (if not found or not given than first geometry is used)")
parser.add_argument("--dbname",
help="Output PostGIS database")
parser.add_argument("--schema",
help="Schema name (default: public)")
parser.add_argument("--user",
help="User name")
parser.add_argument("--passwd",
help="Password")
parser.add_argument("--host",
help="Host name")
parser.add_argument("--port",
help="Port")
parser.add_argument("-o", "--overwrite",
action='store_true',
help="Overwrite existing PostGIS tables")
parser.add_argument("-a", "--append",
action='store_true',
help="Append to existing PostGIS tables")
return parser.parse_args(), parser.print_help
def main():
# parse cmdline arguments
options, usage = parse_args()
options.format = 'PostgreSQL'
try:
file_list = parse_cmd(options)
except VfrErrorCmd as e:
usage()
sys.exit('ERROR: {}'.format(e))
# build datasource name
odsn = None
if options.dbname:
odsn = "PG:dbname=%s" % options.dbname
if options.user:
odsn += " user=%s" % options.user
if options.passwd:
odsn += " password=%s" % options.passwd
if options.host:
odsn += " host=%s" % options.host
if options.port:
odsn += " port=%s" % options.port
# create convertor
try:
pg = VfrPg(schema=options.schema, schema_per_file=options.fileschema,
dsn=odsn, geom_name=options.geom, layers=options.layer,
nogeomskip=options.nogeomskip, overwrite=options.overwrite)
except VfrError as e:
sys.exit('ERROR: {}'.format(e))
# write log process header
pg.cmd_log(sys.argv)
if options.list:
# list output database and exit
pg.print_summary()
return 0
# read file list and download VFR files if needed
try:
pg.download(file_list, options.date)
except VfrError as e:
VfrLogger.error(str(e))
if options.download:
# download only requested, exiting
return 0
# import input VFR files to PostGIS
ipass = pg.run(options.append, options.extended)
# create indices for output tables
pg.create_indices()
# print final summary
if (ipass > 1 and options.fileschema is False) \
or options.append:
pg.print_summary()
return 0
if __name__ == "__main__":
atexit.register(check_log)
sys.exit(main())
|
mfitzp/padua
|
padua/utils.py
|
Python
|
bsd-2-clause
| 9,476
| 0.004538
|
import numpy as np
import scipy as sp
import scipy.interpolate
import requests
from io import StringIO
def qvalues(pv, m = None, verbose = False, lowmem = False, pi0 = None):
"""
Copyright (c) 2012, Nicolo Fusi, University of Sheffield
All rights reserved.
Estimates q-values from p-values
Args
=====
m: number of tests. If not specified m = pv.size
verbose: print verbose messages? (default False)
lowmem: use memory-efficient in-place algorithm
pi0: if None, it's estimated as suggested in Storey and Tibshirani, 2003.
For most GWAS this is not necessary, since pi0 is extremely likely to be
1
:param pv:
:param m:
:param verbose:
:param lowmem:
:param pi0:
:return:
"""
assert(pv.min() >= 0 and pv.max() <= 1), "p-values should be between 0 and 1"
original_shape = pv.shape
pv = pv.ravel() # flattens the array in place, more efficient than flatten()
if m == None:
m = float(len(pv))
else:
# the user has supplied an m
m *= 1.0
# if the number of hypotheses is small, just set pi0 to 1
if len(pv) < 100 and pi0 == None:
pi0 = 1.0
elif pi0 != None:
pi0 = pi0
else:
# evaluate pi0 for different lambdas
pi0 = []
lam = sp.arange(0, 0.90, 0.01)
counts = sp.array([(pv > i).sum() for i in sp.arange(0, 0.9, 0.01)])
for l in range(len(lam)):
pi0.append(counts[l]/(m*(1-lam[l])))
pi0 = sp.array(pi0)
# fit natural cubic spline
tck = sp.interpolate.splrep(lam, pi0, k = 3)
pi0 = sp.interpolate.splev(lam[-1], tck)
if pi0 > 1:
if verbose:
print("got pi0 > 1 (%.3f) while estimating qvalues, setting it to 1" % pi0)
pi0 = 1.0
assert(pi0 >= 0 and pi0 <= 1), "pi0 is not between 0 and 1: %f" % pi0
if lowmem:
# low memory version, only uses 1 pv and 1 qv matrices
qv = sp.zeros((len(pv),))
last_pv = pv.argmax()
qv[last_pv] = (pi0*pv[last_pv]*m)/float(m)
pv[last_pv] = -sp.inf
prev_qv = last_pv
for i in range(int(len(pv))-2, -1, -1):
cur_max = pv.argmax()
qv_i = (pi0*m*pv[cur_max]/float(i+1))
pv[cur_max] = -sp.inf
qv_i1 = prev_qv
qv[cur_max] = min(qv_i, qv_i1)
prev_qv = qv[cur_max]
else:
p_ordered = sp.argsort(pv)
pv = pv[p_ordered]
qv = pi0 * m/len(pv) * pv
qv[-1] = min(qv[-1],1.0)
for i in range(len(pv)-2, -1, -1):
qv[i] = min(pi0*m*pv[i]/(i+1.0), qv[i+1])
# reorder qvalues
qv_temp = qv.copy()
qv = sp.zeros_like(qv)
qv[p_ordered] = qv_temp
# reshape qvalues
qv = qv.reshape(original_shape)
return qv
def get_protein_id(s):
"""
Return a shortened string, split on spaces, underlines and semicolons.
Extract the first, highest-ranked protein ID from a string containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param s: protein IDs in MaxQuant format
:type s: str or unicode
:return: string
"""
return str(s).split(';')[0].split(' ')[0].split('_')[0]
def get_protein_ids(s):
"""
Return a list of shortform protein IDs.
Extract all protein IDs from a string containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param s: protein IDs in MaxQuant format
:type s: str or unicode
:return: list of string ids
"""
return [p.split(' ')[0].split('_')[0] for p in s.split(';') ]
def get_protein_id_list(df, level=0):
"""
Return a complete list of shortform IDs from a DataFrame
Extract all protein IDs from a dataframe from multiple rows containing
protein IDs in MaxQuant output format: e.g. P07830;P63267;Q54A44;P63268
Long names (containing species information) are eliminated (split on ' ') and
isoforms are removed (split on '_').
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: list of string ids
"""
protein_list = []
for s in df.index.get_level_values(level):
protein_list.extend( get_protein
|
_ids(s) )
return list(set(protein_list))
def get_shortstr(s):
"""
Return the first part of a string before a semicolon.
Extract the first, highest-ranked protein ID from a string containing
protein IDs in MaxQuant output for
|
mat: e.g. P07830;P63267;Q54A44;P63268
:param s: protein IDs in MaxQuant format
:type s: str or unicode
:return: string
"""
return str(s).split(';')[0]
def get_index_list(l, ms):
"""
:param l:
:param ms:
:return:
"""
if type(ms) != list and type(ms) != tuple:
ms = [ms]
return [l.index(s) for s in ms if s in l]
def format_label(sl, fmt=None):
"""
Combine a list of strings to a single str, joined by sep.
Passes through single strings.
:param sl:
:return:
"""
if isinstance(sl, str):
# Already is a string.
return sl
if fmt:
return fmt.format(*sl)
return ' '.join(str(s) for s in sl)
def build_combined_label(sl, idxs, sep=' ', label_format=None):
"""
Generate a combined label from a list of indexes
into sl, by joining them with `sep` (str).
:param sl: Strings to combine
:type sl: dict of str
:param idxs: Indexes into sl
:type idxs: list of sl keys
:param sep:
:return: `str` of combined label
"""
if label_format:
return label_format % tuple([get_shortstr(str(sl[n])) for n in idxs])
else:
return sep.join([get_shortstr(str(sl[n])) for n in idxs])
def hierarchical_match(d, k, default=None):
"""
Match a key against a dict, simplifying element at a time
:param df: DataFrame
:type df: pandas.DataFrame
:param level: Level of DataFrame index to extract IDs from
:type level: int or str
:return: hiearchically matched value or default
"""
if d is None:
return default
if type(k) != list and type(k) != tuple:
k = [k]
for n, _ in enumerate(k):
key = tuple(k[0:len(k)-n])
if len(key) == 1:
key = key[0]
try:
d[key]
except:
pass
else:
return d[key]
return default
def chunks(seq, num):
"""
Separate `seq` (`np.array`) into `num` series of as-near-as possible equal
length values.
:param seq: Sequence to split
:type seq: np.array
:param num: Number of parts to split sequence into
:type num: int
:return: np.array of split parts
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return np.array(out)
def calculate_s0_curve(s0, minpval, maxpval, minratio, maxratio, curve_interval=0.1):
"""
Calculate s0 curve for volcano plot.
Taking an min and max p value, and a min and max ratio, calculate an smooth
curve starting from parameter `s0` in each direction.
The `curve_interval` parameter defines the smoothness of the resulting curve.
:param s0: `float` offset of curve from interset
:param minpval: `float` minimum p value
:param maxpval: `float` maximum p value
:param minratio: `float` minimum ratio
:param maxratio: `float` maximum ratio
:param curve_interval: `float` stepsize (smoothness) of curve generator
:return: x, y, fn x,y points of curve, and fn generator
"""
mminpval = -np.log10(minpval)
mmaxpval = -np.log10(maxpval)
maxpval_adjust = mmaxpval - mminpval
ax0 = (s0 + maxpval_
|
stscieisenhamer/glue
|
glue/core/qt/message_widget.py
|
Python
|
bsd-3-clause
| 1,541
| 0
|
from __future__ import absolute_import, division, print_func
|
tion
import os
from time import ctime
from qtpy import QtWidgets
from glue import core
from glue.utils.qt import load_ui
class MessageWidget(QtWidgets.QWidget, core.hub.HubListener):
"
|
"" This simple class displays all messages broadcast
by a hub. It is mainly intended for debugging """
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.ui = load_ui('message_widget.ui', self,
directory=os.path.dirname(__file__))
self.ui.messageTable.setColumnCount(3)
labels = ['Time', 'Message', 'Sender']
self.ui.messageTable.setHorizontalHeaderLabels(labels)
def register_to_hub(self, hub):
# catch all messages
hub.subscribe(self, core.message.Message,
handler=self.process_message,
filter=lambda x: True)
def process_message(self, message):
row = self.ui.messageTable.rowCount() * 0
self.ui.messageTable.insertRow(0)
tm = QtWidgets.QTableWidgetItem(ctime().split()[3])
typ = str(type(message)).split("'")[-2].split('.')[-1]
mtyp = QtWidgets.QTableWidgetItem(typ)
typ = str(type(message.sender)).split("'")[-2].split('.')[-1]
sender = QtWidgets.QTableWidgetItem(typ)
self.ui.messageTable.setItem(row, 0, tm)
self.ui.messageTable.setItem(row, 1, mtyp)
self.ui.messageTable.setItem(row, 2, sender)
self.ui.messageTable.resizeColumnsToContents()
|
annegabrielle/secure_adhoc_network_ns-3
|
ns3_source_code/ns-3.10/bindings/python/apidefs/gcc-LP64/ns3_module_nix_vector_routing.py
|
Python
|
gpl-2.0
| 11,439
| 0.012763
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting [class]
module.add_class('Ipv4NixVectorRouting', parent=root_module['ns3::Ipv4RoutingProtocol'])
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >', 'ns3::NixMap_t')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >*', 'ns3::NixMap_t*')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::NixVector >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::NixVector > > > >&', 'ns3::NixMap_t&')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >', 'ns3::Ipv4RouteMap_t')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >*', 'ns3::Ipv4RouteMap_t*')
typehandlers.add_type_alias('std::map< ns3::Ipv4Address, ns3::Ptr< ns3::Ipv4Route >, std::less< ns3::Ipv4Address >, std::allocator< std::pair< ns3::Ipv4Address const, ns3::Ptr< ns3::Ipv4Route > > > >&', 'ns3::Ipv4RouteMap_t&')
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Ipv4NixVectorRouting_methods(root_module, root_module['ns3::Ipv4NixVectorRouting'])
return
def register_Ns3Ipv4NixVectorRouting_methods(root_module, cls):
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting(ns3::Ipv4NixVectorRouting const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4NixVectorRouting const &', 'arg0')])
## ipv4-nix-vector-routing.h: ns3::Ipv4NixVectorRouting::Ipv4NixVectorRouting() [constructor]
cls.add_constructor([])
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVect
|
orRouting::FlushGlobalNixRoutingCache() [member function]
cls.add_method('FlushGlobalNixRoutingCache',
'void',
[])
## ipv4-nix-vector-routing.h: static ns3::TypeId ns3::Ipv4NixVectorRouting::G
|
etTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: void ns3::Ipv4NixVectorRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_const=True, visibility='private', is_virtual=True)
## ipv4-nix-vector-routing.h: bool ns3::Ipv4NixVectorRouting::RouteInput(ns3::Ptr<ns3::Packet const> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void, ns3::Ptr<ns3::Ipv4Route>, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::Ipv4Header const&, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3:
|
oscarlab/betrfs
|
benchmarks/aging/mailserver/mailserver-aging.py
|
Python
|
gpl-2.0
| 2,672
| 0.030689
|
#!/usr/bin/python
import imaplib
import sys
import random
import os
import threading
import time
import types
import subprocess
SERVER = "localhost"
USER = ["ftfstest1", "ftfstest2", "ftfstest3", "ftfstest4", "ftfstest5", "ftfstest6", "ftfstest7", "ftfstest8", "ftfstest9", "ftfstest10", "ftfstest11", "ftfstest12", "ftfstest13", "ftfstest14", "ftfstest15", "ftfstest16"]
PASS = ["oscarlab", "oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab","oscarlab"]
n_user = 2
n_box = 80
boxsize = 1000
max_msg_len = 32768
run_time = 1800
n_top = 8000
def worker_thread(i, n_op_thread, running) :
m = imaplib.IMAP4_SSL(SERVER)
m.login(USER[i], PASS[i])
while not running.isSet() :
pass
n_ops = [0] * 3
# while running.isSet() :
for i in range(n_top) :
boxnum = random.randint(1, n_box) - 1
box = "box%d" % boxnum
x = m.select(box)
rand_op = random.randint(1, 2) - 1
if rand_op == 0 :
msg_len = random.randint(1, max_msg_len)
msg = os.urandom(msg_len)
m.APPEND(box, None, None, msg)
else :
typ, msg_ids = m.search(None, 'ALL')
msgs = msg_ids[0].split()
msg_num = random.randint(1, len(msgs)) - 1
msg = msgs[msg_num]
# if rand_op == 1 :
m.store(msg, "+FLAGS", "(\\Deleted)")
m.expunge()
# else :
# typ, data = m.fetch(msg, "(RFC822 FLAGS)")
# flagged = 0
# if type(data[0]) is types.NoneType :
# continue
# flagged = 0
# for flag in imaplib.ParseFlags(data[0][0]) :
# if (flag == "\Flagged") :
# flagged = 1
# if flagged :
# m.store(msg, "-FLAGS", "(\\FLAGGED)")
# else :
# m.store(msg, "+FLAGS", "(\\FLAGGED)")
n_ops[rand_op] = n_ops[rand_op] + 1
subprocess.call('echo "flush" > /proc/toku_flusher', shell=True)
m.logout()
print "Thread %d: append %d delete %d flag change %d" % (i, n_ops[0], n_ops[1], n_ops[2])
n_op_thread.append(n_ops[0] + n_ops[1] + n_ops[2])
print "MAILSERVER AGEING"
f=open('mailservertime.out','a')
t = []
running = threading.Event()
n_op_thread = []
for i in range(n_user) :
tmp_t = threading.Thread(target = worker_thread, args = (i, n_op_thread, running,))
tmp_t.start()
t.append(tmp_t)
time.sleep(2)
running.set()
t1 = time.time()
#time.sleep(run_time)
#running.clear()
for i in range(n_u
|
ser):
t[i].join()
t2 = time.time()
n
|
_op_total = 0
for i in range(n_user) :
n_op_total = n_op_total + n_op_thread[i]
print "This experiment took %f seconds" % (t2 - t1)
print "%d ops are executed (%f op/s)" % (n_op_total, n_op_total / (t2 - t1))
f.write("Time\t")
f.write(str(t2 - t1) + '\t')
f.write("Nops\t")
f.write(str(n_op_total) + '\n')
sys.exit(0)
|
vdmann/cse-360-image-hosting-website
|
src/mvp_landing/urls.py
|
Python
|
mit
| 1,288
| 0.003882
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
# not sure about line 7
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^dropzone-drag-drop/$', include('dragdrop.urls', namespace="dragdrop", app_name="dragdrop")),
url(r'^index/$', 'dragdrop.views.GetUserImages'),
u
|
rl(r'^$', 'signups.views.home', name='home'),
url(r'^register/$', 'drinker.views.DrinkerRegistration'),
url(r'^login/$', 'drinker.views.LoginRequest'),
url(r'^logout/$', 'drinker.views.LogOutRequest'),
url(r'^index/filter/$', 'filter.views.changeBright'),
# Uncomment the admin/doc line below to enable ad
|
min documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# not sure if I need an actual url wrapper in this code.
# url(r'^admin/varnish/', include('varnishapp.urls')),
)
if settings.DEBUG:
# urlpatterns add STATIC_URL and serves the STATIC_ROOT file
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
dongjinleekr/wpdb
|
wpdb.py
|
Python
|
apache-2.0
| 16,291
| 0.008348
|
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import DateTime, Integer, String, Text
from sqlalchemy import ForeignKeyConstraint, UniqueConstraint
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import backref, dynamic_loader, mapper, relation
from sqlalchemy.orm.collections import column_mapped_collection
class Term:
def __init__(self, name, slug, term_group=0):
self.name = name
self.slug = slug
self.term_group = term_group
def __repr__(self):
return '<Term(%r, %r, %r)>' % (self.name, self.slug, self.term_group)
class Taxonomy(object):
def __init__(self, term, description):
self.term = term
self.description = description
class PostTag(Taxonomy):
def __repr__(self):
return '<PostTag(%r, %r)>' % (self.term, self.description)
class Category(Taxonomy):
def __repr__(self):
return '<Category(%r, %r)>' % (self.term, self.description)
class LinkCategory(Taxonomy):
def __repr__(self):
return '<LinkCategory(%r, %r)>' % (self.term, self.description)
class PostMeta(object):
def __init__(self, meta_key, meta_value):
self.meta_key = meta_key
self.meta_value = meta_value
def __repr__(self):
return '<PostMeta(%r, %r)>' % (self.meta_key, self.meta_value)
class Post(object):
def __init__(self, post_title, post_type='post'):
self.post_title = post_title
self.post_type = post_type
meta = association_proxy('_metadict', 'meta_value', creator=PostMeta)
def __repr__(self):
return '<Post(%r, %r)>' % (self.post_title, self.post_type)
class Link(object):
def __init__(self, link_url, link_name):
self.link_url = link_url
self.link_name = link_name
def __repr__(self):
return '<Link(%r, %r)>' % (self.link_url, self.link_name)
class CommentMeta(object):
def __init__(self, meta_key, meta_value):
self.meta_key = meta_key
self.meta_value = meta_value
def __repr__(self):
return '<CommentMeta(%r, %r)>' % (self.meta_key, self.meta_value)
class Comment(object):
def __init__(self, comment_author, comment_content):
self.comment_author = comment_author
self.comment_content = comment_content
meta = association_proxy('_metadict', 'meta_value', creator=CommentMeta)
def __repr__(self):
return '<Comment(%r, %r)>' % (self.comment_author, self.comment_content)
class UserMeta(object):
def __init__(self, meta_key, meta_value):
self.meta_key = meta_key
self.meta_value = meta_value
def __repr__(self):
return '<UserMeta(%r, %r)>' % (self.meta_key, self.meta_value)
class User(object):
def __init__(self, user_login):
self.user_login = user_login
meta = association_proxy('_metadict', 'meta_value', creator=UserMeta)
def __repr__(self):
return '<User(%r)>' % self.user_login
class Option(object):
def __init__(self, option_name, option_value):
self.option_name = option_name
self.option_value = option_value
def __repr__(self):
return '<Option(%r, %r)>' % (self.option_name, self.option_value)
def init(prefix='wp'):
metadata = MetaData()
# tables
terms = Table('%s_terms' % prefix, metadata,
Column('term_id', Integer(), primary_key=True, nullable=False),
Column('name', String(length=55), primary_key=False, nullable=False),
Column('slug', String(length=200), primary_key=False, nullable=False),
Column('term_group', Integer(), primary_key=False, nullable=False),
UniqueConstraint('slug'),
)
term_taxonomy = Table('%s_term_taxonomy' % prefix, metadata,
Column('term_taxonomy_id', Integer(), primary_key=True, nullable=False),
Column('term_id', Integer(), primary_key=False, nullable=False),
Column('taxonomy', String(length=32), primary_key=False, nullable=False),
Column('description', Text(length=None), primary_key=False, nullable=False),
Column('parent', Integer(), primary_key=False, nullable=False),
Column('count', Integer(), primary_key=False, nullable=False),
UniqueConstraint('term_id', 'taxonomy'),
ForeignKeyConstraint(['term_id'], ['%s_terms.term_id' % prefix]),
ForeignKeyConstrai
|
nt(['parent'], ['%s_term_taxonomy.term_taxonomy_id' % prefix]),
)
term_relationships = Table('%s_term_relationships' % prefix, metadata,
Column('object_id', Integer(), primary_key=True, nullable=False),
Column('term_taxonomy_id', Integer(), primary_key=True, nullable=False),
ForeignKeyConstraint(['term_taxonomy_id'], ['%s_term_taxonomy.term_taxonomy_id' % prefix]),
)
postmeta = Table('%s_postmeta' % prefix, metadata,
|
Column('meta_id', Integer(), primary_key=True, nullable=False),
Column('post_id', Integer(), primary_key=False, nullable=False),
Column('meta_key', String(length=255), primary_key=False),
Column('meta_value', Text(length=None), primary_key=False),
ForeignKeyConstraint(['post_id'], ['%s_posts.ID' % prefix]),
)
posts = Table('%s_posts' % prefix, metadata,
Column('ID', Integer(), primary_key=True, nullable=False),
Column('post_author', Integer(), primary_key=False, nullable=False),
Column('post_date', DateTime(timezone=False), primary_key=False, nullable=False),
Column('post_date_gmt', DateTime(timezone=False), primary_key=False, nullable=False),
Column('post_content', Text(length=None), primary_key=False, nullable=False),
Column('post_title', Text(length=None), primary_key=False, nullable=False),
Column('post_excerpt', Text(length=None), primary_key=False, nullable=False),
Column('post_status', String(length=10), primary_key=False, nullable=False),
Column('comment_status', String(length=15), primary_key=False, nullable=False),
Column('ping_status', String(length=6), primary_key=False, nullable=False),
Column('post_password', String(length=20), primary_key=False, nullable=False),
Column('post_name', String(length=200), primary_key=False, nullable=False),
Column('to_ping', Text(length=None), primary_key=False, nullable=False),
Column('pinged', Text(length=None), primary_key=False, nullable=False),
Column('post_modified', DateTime(timezone=False), primary_key=False, nullable=False),
Column('post_modified_gmt', DateTime(timezone=False), primary_key=False, nullable=False),
Column('post_content_filtered', Text(length=None), primary_key=False, nullable=False),
Column('post_parent', Integer(), primary_key=False, nullable=False),
Column('guid', String(length=255), primary_key=False, nullable=False),
Column('menu_order', Integer(), primary_key=False, nullable=False),
Column('post_type', String(length=20), primary_key=False, nullable=False),
Column('post_mime_type', String(length=100), primary_key=False, nullable=False),
Column('comment_count', Integer(), primary_key=False, nullable=False),
ForeignKeyConstraint(['post_author'], ['%s_users.ID' % prefix]),
ForeignKeyConstraint(['post_parent'], ['%s_posts.ID' % prefix]),
)
links = Table('%s_links' % prefix, metadata,
Column('link_id', Integer(), primary_key=True, nullable=False),
Column('link_url', String(length=255), primary_key=False, nullable=False),
Column('link_name', String(length=255), primary_key=False, nullable=False),
Column('link_image', String(length=255), primary_key=False, nullable=False),
Column('link_target', String(length=25), primary_key=False, nullable=False),
Column('link_category', Integer(), primary_key=False, nullable=False),
Column('link_description', String(length=255), primary_key=False, nullable=False),
Column('link_visible', String(length=1), primary_key=False, nullable=False),
Column('link_owner', Integer(), primary_key=False, nullable=False),
Column('link_rating', Integer(), primary_key=False, nullable=False),
Column('link_updated', DateTime(timezone=False), primary_key=False, nullable=False),
Column('link_rel', String(length=255), primary_key=False, nullable=False),
Column('link_notes', Text(length=None), primary_key=False, nullable=False),
Column('link_rss', String(length=255), primary_key=False, nullable=False),
ForeignKeyConstraint(['link_owner'], ['%s_users.ID' %
|
natict/roomservice
|
roomservice/mysql.py
|
Python
|
mit
| 1,306
| 0
|
import pymysql
from flask_restful import Resource
from flask import abort
ALLOWED_SHOW = ('processlist', 'databases', 'plugins', 'privileges')
class Mysql(Resource):
def __init__(self):
self.connection = pymysql.connect(user='root')
self.cursor = self.connection.cursor()
def _execute(self, sql):
self.cursor.execute(sql)
|
desc_id = tuple(x[0] for x in self.cursor.description)
query_result = self.cursor.fetchall()
results = [dict(zip(desc_id, item)) for item in query_result]
return results
def get(self, cmd):
if cmd in ALLOWED_SHOW:
return self._execute('show ' + cmd)
else:
abort(404)
class MysqlDatabase(Mysql):
def get(self, dbname):
try:
self.connection.select_db(dbname)
except pymysql.InternalError as e:
|
abort(400, e.args)
return self._execute('show tables')
def post(self, dbname):
try:
self.cursor.execute('create database ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
def delete(self, dbname):
try:
self.cursor.execute('drop database if exists ' + dbname)
except pymysql.ProgrammingError as e:
abort(400, e.args)
|
phase/ApplePi
|
fastmc/auth.py
|
Python
|
mit
| 7,818
| 0.004861
|
# -*- coding: utf-8 -*-
#
# Copyr
|
ight (c) 2014, Florian Wesch <fw@dividuum.de>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and
|
the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import hashlib
from uuid import UUID
from simplejson import dumps as json_dumps
print "If you experience a crash due to Crypto, please do the following!"
print "goto your lib/site-packages and rename \'crypto\' to \'Crypto\'"
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Cipher import AES
import requests
log = logging.getLogger(__name__)
# Encryption magic based on sadimusi/mc3p encryption implementation
# https://github.com/sadimusi/mc3p/blob/master/mc3p/encryption.py
def _pkcs1_unpad(bytes):
pos = bytes.find('\x00')
if pos > 0:
return bytes[pos+1:]
def _pkcs1_pad(bytes):
assert len(bytes) < 117
padding = ""
while len(padding) < 125-len(bytes):
byte = Random.get_random_bytes(1)
if byte != '\x00':
padding += byte
return '\x00\x02%s\x00%s' % (padding, bytes)
def generate_key_pair():
"""Generates a 1024 bit RSA key pair"""
return RSA.generate(1024)
def encode_public_key(key):
"""Encodes a public RSA key in ASN.1 format as defined by x.509"""
return key.publickey().exportKey(format="DER")
def generate_random_bytes(length):
return Random.get_random_bytes(length)
def generate_challenge_token():
"""Generates 4 random bytes"""
return generate_random_bytes(4)
def generate_server_id():
"""Generates 20 random hex characters"""
return "".join("%02x" % ord(c) for c in generate_random_bytes(10))
def decrypt_with_private_key(data, private_key):
"""Decrypts the PKCS#1 padded shared secret using the private RSA key"""
return _pkcs1_unpad(private_key.decrypt(data))
def generated_cipher(shared_secret):
"""Creates a AES128 stream cipher using cfb8 mode"""
return AES.new(shared_secret, AES.MODE_CFB, shared_secret)
def decode_public_key(bytes):
"""Decodes a public RSA key in ASN.1 format as defined by x.509"""
return RSA.importKey(bytes)
def generate_shared_secret():
"""Generates a 128 bit secret key to be used in symmetric encryption"""
return generate_random_bytes(16)
def encrypt_with_public_key(data, public_key):
"""Encrypts the PKCS#1 padded shared secret using the public RSA key"""
return public_key.encrypt(_pkcs1_pad(data), 0)[0]
class SessionException(Exception):
pass
class Session(object):
YGGDRASIL_BASE = "https://authserver.mojang.com"
@classmethod
def make_client_token(cls):
return "".join("%02x" % ord(c) for c in generate_random_bytes(16))
@classmethod
def from_credentials(cls, username, password, client_token=None):
if client_token is None:
client_token = cls.make_client_token()
info = cls.do_request("/authenticate", {
'agent': {
'name': 'Minecraft',
'version': 1,
},
'username': username,
'password': password,
'clientToken': client_token,
})
return cls(
info['accessToken'],
info['selectedProfile']['name'],
info['selectedProfile']['id']
)
@classmethod
def from_access_token(cls, access_token):
info = cls.do_request("/refresh", {
'accessToken': access_token
})
return cls(
info['accessToken'],
info['selectedProfile']['name'],
info['selectedProfile']['id']
)
@classmethod
def from_authinfo(cls, access_token, player_ign, player_uuid):
return cls(
access_token,
player_ign,
player_uuid,
)
def __init__(self, access_token, player_ign, uuid):
self._access_token = access_token
self._player_ign = player_ign
self._uuid = UUID(uuid)
def refresh(self):
return Session(self._access_token)
@property
def player_ign(self):
return self._player_ign
@property
def uuid(self):
return str(self._uuid)
@property
def uuid_hex(self):
return self._uuid.hex
@property
def access_token(self):
return self._access_token
@property
def session_id(self):
return 'token:%s:%s' % (self._access_token, self.uuid_hex)
def __str__(self):
return "<Session: %s (%s) (accessToken: %s)>" % (
self._player_ign, self._uuid, self._access_token)
def validate(self):
r = requests.post(self.YGGDRASIL_BASE + "/validate", data=json_dumps({
'accessToken': self._access_token
}))
return r.status_code in (200, 204)
def invalidate(self):
r = requests.post(self.YGGDRASIL_BASE + "/invalidate", data=json_dumps({
'accessToken': self._access_token
}))
return r.status_code in (200, 204)
@classmethod
def do_request(cls, endpoint, data):
try:
log.debug("sending %s" % (data,))
r = requests.post(cls.YGGDRASIL_BASE + endpoint, data=json_dumps(data))
if not r.ok:
try:
error = r.json()['errorMessage']
except:
error = "unknown error"
raise SessionException("%d: %s" % (r.status_code, error))
json = r.json()
log.debug("received %s" % (json,))
return json
except requests.exceptions.RequestException, err:
raise SessionException(err.message)
def make_server_hash(server_id, shared_secret, key):
digest = hashlib.sha1()
digest.update(server_id)
digest.update(shared_secret)
digest.update(encode_public_key(key))
d = long(digest.hexdigest(), 16)
if d >> 39 * 4 & 0x8:
return "-%x" % ((-d) & (2 ** (40 * 4) - 1))
return "%x" % d
def join_server(session, server_hash):
r = requests.post('https://sessionserver.mojang.com/session/minecraft/join', data=json_dumps({
'accessToken': session.access_token,
'selectedProfile': session.uuid_hex,
'serverId': server_hash,
}), headers = {
'Content-Type': 'application/json', #; charset=utf-8',
'User-Agent': None,
})
return r.status_code in (200, 204)
def check_player(player_ign, server_hash):
r = requests.get('https://sessionserver.mojang.com/session/minecraft/hasJoined?username=%s&serverId=%s' % (
player_ign, server_hash))
return None if r.status_code != 200 else r.json()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.