repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mupif/mupif
|
examples/Example10-jobMan-distrib-PBS/application10.py
|
Python
|
lgpl-3.0
| 5,189
| 0.003469
|
import sys
import os
import Pyro5
import logging
sys.path.extend(['..', '../..'])
import mupif as mp
import time as timemod
import uuid
import pbs_tool
log = logging.getLogger()
@Pyro5.api.expose
class Application10(mp.Model):
"""
Simple application which sums given time values times 2
"""
def __init__(self, metadata={}, **kwargs):
MD = {
'Name': 'Simple time summator',
'ID': 'N/A',
'Description': 'Cummulates given time values times 2',
'Version_date': '12/2021',
'Physics': {
'Type': 'Other',
'Entity': 'Other'
},
'Solver': {
'Software': 'Python script',
'Language': 'Python3',
'License': 'LGPL',
'Creator': 'Stanislav',
'Version_date': '12/2021',
'Type': 'Summator',
'Documentation': 'Nowhere',
'Estim_time_step_s': 1,
'Estim_comp_time_s': 0.01,
'Estim_execution_cost_EUR': 0.01,
'Estim_personnel_cost_EUR': 0.01,
'Required_expertise': 'None',
'Accuracy': 'High',
'Sensitivity': 'High',
'Complexity': 'Low',
'Robustness': 'High'
},
'Inputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Time value',
'Description': 'Time', 'Units': 's', 'Required': True, "Set_at": "timestep", "Obj_ID": '1', "ValueType": "Scalar"}
],
'Outputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Cummulated time value',
'Description': 'Cummulative time', 'Units': 's', "ValueType": "Scalar"}
]
}
super().__init__(metadata=MD, **kwargs)
self.updateMetadata(metadata)
self.value = 0.
self.input = 0.
def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs):
super().initialize(workdir=workdir, metadata=metadata, validateMetaData=validateMetaData, **kwargs)
def get(self, objectTypeID, time=None, objectID=""):
md = {
'Execution': {
'ID': self.getMetadata('Execution.ID'),
'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),
'Task_ID': self.getMetadata('Execution.Task_ID')
}
}
if objectTypeID == mp.DataID.PID_Time:
return mp.ConstantProperty(value=self.value, propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=time, metadata=md)
def set(self, obj, objectID=""):
|
if obj.isInstance(mp.Property):
if obj.getPropertyID() == mp.DataID.PID_Time:
self.input = obj.inUnitsOf(mp.U.s).getValue()
def solveStep(self, tstep, stageID=0, runInBackground=False):
# this function is designed to run the executable in Torque or Slurm PBS and process the output when the job is finished.
rp = os.path.realpath(__file__)
dirname = os.path.dirname(rp)
# create unique input and output file names (this is specific for e
|
ach application/executable)
step_id = uuid.uuid4()
inpfile = "%s/inp_%s.txt" % (dirname, step_id)
outfile = "%s/out_%s.txt" % (dirname, step_id)
#
# create the input file
f = open(inpfile, 'w')
f.write("%f" % self.input)
f.close()
#
# submit the job
jobid = pbs_tool.submit_job(command=" -v inpfile=\"%s\",outfile=\"%s\",script=\"%s/appexec.py\",dirname=\"%s\" %s/appexec.job -o %s/log.txt -e %s/err.txt" % (inpfile, outfile, dirname, dirname, dirname, dirname, dirname))
#
# wait until the job is finished
# After its completion, the job stays in the list of jobs with 'Completed' status for a while.
# After that time it is not in the list any more, which results in 'Unknown' state.
# With 60-second period of checking the job should be still available in the list.
pbs_tool.wait_until_job_is_done(jobid=jobid, checking_frequency=1.)
#
# process the results (this is specific for each application/executable)
if os.path.exists(outfile):
f = open(outfile, 'r')
read_value = f.readline()
f.close()
if read_value != "error":
self.value += float(read_value)
else:
raise mp.apierror.APIError("A problem occured in the solver.")
else:
print("File '%s' does not exist." % outfile)
raise mp.apierror.APIError("The output file does not exist.")
# delete the temporary input and output files
if os.path.exists(inpfile):
os.remove(inpfile)
if os.path.exists(outfile):
os.remove(outfile)
def getCriticalTimeStep(self):
return 1000.*mp.U.s
def getAssemblyTime(self, tstep):
return tstep.getTime()
def getApplicationSignature(self):
return "Application10"
|
hirofumi0810/tensorflow_end2end_speech_recognition
|
utils/progressbar.py
|
Python
|
mit
| 441
| 0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __fut
|
ure__ import absolute_import
from __future__ import division
from __future__
|
import print_function
from tqdm import tqdm
def wrap_iterator(iterator, progressbar):
if progressbar:
iterator = tqdm(iterator)
return iterator
def wrap_generator(generator, progressbar, total):
if progressbar:
generator = tqdm(generator, total=total)
return generator
|
cosven/FeelUOwn
|
feeluown/gui/uimodels/my_music.py
|
Python
|
gpl-3.0
| 980
| 0
|
from feeluown.utils.dispatch import Signal
from feeluown.gui.widgets.my_music import MyMusicModel
class MyMusicItem(object):
def __in
|
it__(self, text):
self.text = text
self.clicked = Signal()
class MyMusicUiManager:
"""
.. note::
|
目前,我们用数组的数据结构来保存 items,只提供 add_item 和 clear 方法。
我们希望,MyMusic 中的 items 应该和 provider 保持关联。provider 是 MyMusic
的上下文。
而 Provider 是比较上层的对象,我们会提供 get_item 这种比较精细的控制方法。
"""
def __init__(self, app):
self._app = app
self._items = []
self.model = MyMusicModel(app)
@classmethod
def create_item(cls, text):
return MyMusicItem(text)
def add_item(self, item):
self.model.add(item)
self._items.append(item)
def clear(self):
self._items.clear()
self.model.clear()
|
indico/indico
|
indico/modules/events/sessions/schemas.py
|
Python
|
mit
| 937
| 0.002134
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from marshmallow import fields
from indico.core.marshmallow import mm
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.sessions import Session
class SessionBlockSchema(mm.SQLAlchemyAutoSchema):
room_name_verbose = fields.Function(lambda obj: obj.get_room_name(full=False, verbose=True))
class Meta:
model = SessionBlock
fields = ('id', 'title', 'code', 'start_dt', 'end_dt', 'duration', 'room_name', 'room_name_verbose')
class BasicSessionSchema(mm.SQLAlchemyAutoSchema):
blocks = fields.Nested(Session
|
BlockSchema, many=True)
class Meta:
model = Session
|
fields = ('id', 'title', 'friendly_id', 'blocks')
|
cherry-hyx/hjb-test
|
脚本/deploy/t.py
|
Python
|
artistic-2.0
| 449
| 0
|
# !/usr/bin/env python
# -*-coding:utf-8-*-
# by huangjiangbo
# 部署服务
# deploy.py
from ConfigParser import C
|
onfigParser
C
|
onfigFile = r'config.ini' # 读取配置文件
config = ConfigParser()
config.read(ConfigFile)
de_infos = config.items(r'deploy_server') # 远程部署服务器信息
redeploy_server_info = {}
appinfo = {}
print de_infos
for (key, value) in de_infos:
redeploy_server_info[key] = value
print redeploy_server_info
|
neuroelectro/neuroelectro_org
|
neuroelectro/migrations/0024_auto_20160604_1750.py
|
Python
|
gpl-2.0
| 423
| 0
|
# -*- coding: utf-8 -*-
from __future__
|
import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('neuroelectro', '0023_auto_20160604_1620'),
]
|
operations = [
migrations.AlterField(
model_name='datatablestat',
name='last_curated_on',
field=models.DateTimeField(null=True),
),
]
|
wbg-optronix-lab/emergence-lab
|
core/api/user.py
|
Python
|
mit
| 460
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import generics, permissions
from core.models import User
from core.ser
|
ializers import UserSerializer
class UserListAPIView(generics.ListAPIView):
"""
Read-only API View to list all users.
"""
queryset = User.objects.all()
serializ
|
er_class = UserSerializer
permission_classes = (permissions.IsAuthenticated,)
paginate_by = 100
|
yoshrote/Columns
|
columns/lib/authentication.py
|
Python
|
bsd-3-clause
| 21,673
| 0.036635
|
import urllib, cgi
from decorator import decorator
from pylons.controllers.util import redirect
from pylons.controllers.util import Response, Request
from columns.lib import helpers
#from columns.lib import oauthtwitter
import oauthtwitter
from columns.lib import json
from columns.lib.exc import NoResultFound
from openid.consumer import consumer
from openid.extensions import pape, sreg
import logging
log = logging.getLogger(__name__)
__all__ = ['make_oid_store','store_user','retrieve_user','AuthenticationAction','AuthenticationResponse','AuthenticationMiddleware']
'''
from openid.store.interface import OpenIDStore
from openid.association import Association
from openid.store import nonce
from pymongo.errors import OperationFailure
class MongoStore(OpenIDStore):
"""\
This is the interface for the store objects the OpenID library
uses. It is a single class that provides all of the persistence
mechanisms that the OpenID library needs, for both servers and
consumers.
@change: Version 2.0 removed the C{storeNonce}, C{getAuthKey}, and C{isDumb}
methods, and changed the behavior of the C{L{useNonce}} method
to support one-way nonces. It added C{L{cleanupNonces}},
C{L{cleanupAssociations}}, and C{L{cleanup}}.
@sort: storeAssociation, getAssociation, removeAssociation,
useNonce
"""
associations_table = 'associations'
nonces_table = 'nonces'
def __init__(self, conn, associations_table=None, nonces_table=None):
"""
This creates a new MongoStore instance. It requires an
established database connection be given to it, and it allows
overriding the default collection names.
@param conn: This must be an established connection to a
MongoDB database.
@type conn: A pymongo compatable connection
object.
@param associations_table: This is an optional parameter to
specify the name of the collection used for storing
associations. The default value is specified in
C{L{MongoStore.associations_table}}.
@type associations_table: C{str}
@param nonces_table: This is an optional parameter to specify
the name of the collection used for storing nonces. The
default value is specified in C{L{MongoStore.nonces_table}}.
@type nonces_table: C{str}
"""
self.conn = conn
self._table_names = {
'associations': associations_table or self.associations_table,
'nonces': nonces_table or self.nonces_table,
}
self.max_nonce_age = 6 * 60 * 60 # Six hours, in seconds
def storeAssociation(self, server_url, association):
"""
This method puts a C{L{Association
<openid.association.Association>}} object into storage,
retrievable by server URL and handle.
@param server_url: The URL of the identity server that this
association is with. Because of the way the server
portion of the library uses this interface, don't assume
there are any limitations on the character set of the
input string. In particular, expect to see unescaped
non-url-safe characters in the server_url field.
@type server_url: C{str}
@param association: The C{L{Association
<openid.association.Association>}} to store.
@type association: C{L{Association
<openid.association.Association>}}
@return: C{None}
@rtype: C{NoneType}
"""
a = association
self.conn[self._table_names['associations']].find_and_modify({
'server_url':server_url,
'handle':a.handle,
'issued':a.issued,
'lifetime':a.lifetime,
'assoc_type':a.assoc_type,
'secret':a.secret
}, update=True, upsert=True)
def getAssociation(self, server_url, handle=None):
"""
This method returns an C{L{Association
<openid.association.Association>}} object from storage that
matches the server URL and, if specified, handle. It returns
C{None} if no such association is found or if the matching
association is expired.
If no handle is specified, the store may return any
association which matches the server URL. If multiple
associations are valid, the recommended return value for this
method is the one most recently issued.
This method is allowed (and encouraged) to garbage collect
expired associations when found. This method must not return
expired associations.
@param server_url: The URL of the identity server to get the
association for. Because of the way the server portion of
the library uses this interface, don't assume there are
any limitations on the character set of the input string.
In particular, expect to see unescaped non-url-safe
characters in the server_url field.
@type server_url: C{str}
@param handle: This optional parameter is the handle of the
specific association to get. If no specific handle is
provided, any valid association matching the server URL is
returned.
@type handle: C{str} or C{NoneType}
@return: The C{L{Association
<openid.association.Association>}} for the given identity
server.
@rtype: C{L{Association <openid.association.Association>}} or
C{NoneType}
"""
if handle is not None:
rows = self.conn[self._table_names['associations']].find({'server_url':server_url, 'handle':handle})
else:
rows = self.conn[self._table_names['associations']].find({'server_url':server_url})
if len(rows) == 0:
return None
else:
associations = []
for values in rows:
assoc = Association(**values)
if assoc.getExpiresIn() == 0:
self.removeAssociation(server_url, assoc.handle)
else:
associations.append((assoc.is
|
sued, assoc))
if associations:
associations.sort()
return associations[-1][1]
else:
return None
def removeAssociation(self, server_url, handle):
"""
This method removes the matching association if it's found,
and re
|
turns whether the association was removed or not.
@param server_url: The URL of the identity server the
association to remove belongs to. Because of the way the
server portion of the library uses this interface, don't
assume there are any limitations on the character set of
the input string. In particular, expect to see unescaped
non-url-safe characters in the server_url field.
@type server_url: C{str}
@param handle: This is the handle of the association to
remove. If there isn't an association found that matches
both the given URL and handle, then there was no matching
handle found.
@type handle: C{str}
@return: Returns whether or not the given association existed.
@rtype: C{bool} or C{int}
"""
tmp = self.conn[self._table_names['associations']].find_and_modify({'server_url':server_url, 'handle':handle},remove=True)
return tmp is not None
def useNonce(self, server_url, timestamp, salt):
"""Called when using a nonce.
This method should return C{True} if the nonce has not been
used before, and store it for a while to make sure nobody
tries to use the same value again. If the nonce has already
been used or the timestamp is not current, return C{False}.
You may use L{openid.store.nonce.SKEW} for your timestamp window.
@change: In earlier versions, round-trip nonces were used and
a nonce was only valid if it had been previously stored
with C{storeNonce}. Version 2.0 uses one-way nonces,
requiring a different implementation here that does not
depend on a C{storeNonce} call. (C{storeNonce} is no
longer part of the interface.)
@param server_url: The URL of the server from which the nonce
originated.
@type server_url: C{str}
@param timestamp: The time that the nonce was created (to the
nearest second), in seconds since January 1 1970 UTC.
@type timestamp: C{int}
@param salt: A random string that makes two nonces from the
same server issued during the same second unique.
@type salt: str
@return: Whether or not the nonce was valid.
@rtype: C{bool}
"""
if abs(timestamp - time.time()) > nonce.SKEW:
return False
try:
self.conn[self._table_names['nonces']].insert({'server_url':server_url, 'timestamp':timestamp, 'salt':salt}, safe=True)
except OperationFailure:
# The key uniqueness check failed
return False
e
|
srguiwiz/nrvr-commander
|
src/nrvr/xml/etree.py
|
Python
|
bsd-2-clause
| 4,914
| 0.004274
|
#!/usr/bin/python
"""nrvr.xml.etree - Utilities for xml.etree.ElementTree
The main class provided by this module is ElementTreeUtil.
To be expanded as needed.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import copy
import xml.etree.ElementTree
class ElementTreeUtil(object):
"""Utilities for xml.etree.ElementTree.
Written for Python 2.6."""
@classmethod
def indent(cls, element, indent=" ", level=0):
"""Set whitespace for indentation.
element
an xml.etree.ElementTree.Element instance.
indent
the additional indent for each level down.
level
increases on recursive calls.
Need not be set on regular use."""
levelIndent = "\n" + level * indent
if len(element):
# element has child element
if not element.text or not element.text.strip():
# element has no text or text is only whitespace
element.text = levelIndent + indent
for child in element:
# child indented one level more
cls.indent(child, indent=indent, level=level + 1)
if not child.tail or not child.tail.strip():
# last child has no tail or tail is only whitespace
child.tail = levelIndent
if level > 0:
# any level except top level
if not element.tail or not element.tail.strip():
# element has no tail or tail is only whitespace
element.tail = levelIndent
else:
# top level
element.tail = ""
@classmethod
def unindent(cls, element):
"""Remove whitespace from indentation.
element
an xml.etree.ElementTree.Element instance."""
if len(element):
# element has child element
if not element.text or not element.text.strip():
# element has no text or text is only whitespace
element.text = ""
for child in element:
# child indented one level more
cls.unindent(child)
if not element.tail or not element.tail.strip():
# element has no tail or tail is only whitespace
element.tail = ""
@classmethod
def tostring(cls, element, indent=" ", xml_declaration=True, encoding="utf-8"):
"""Generate a string representation.
element
an xml.etree.ElementTree.Element instance.
Tolerates xml.etree.ElementTree.ElementTree.
indent
the additional indent
|
for each level down.
|
If None then unindented.
xml_declaration
whether with XML declaration <?xml version="1.0" encoding="utf-8"?>."""
# tolerate tree instead of element
if isinstance(element, xml.etree.ElementTree.ElementTree):
# if given a tree
element = element.getroot()
element = copy.deepcopy(element)
if indent is not None:
cls.indent(element, indent)
else:
cls.unindent(element)
string = xml.etree.ElementTree.tostring(element, encoding=encoding)
if xml_declaration:
string = '<?xml version="1.0" encoding="{0}"?>\n'.format(encoding) + string
return string
@classmethod
def simpledict(cls, element):
"""Generate a dictionary from child element tags and text.
element
an xml.etree.ElementTree.Element instance."""
children = element.findall('*')
dictionary = {}
for child in children:
dictionary[child.tag] = child.text
return dictionary
if __name__ == "__main__":
import sys
tree = xml.etree.ElementTree.ElementTree(xml.etree.ElementTree.XML \
("""<e1 a1="A1">
<e2 a2="A2">E2</e2>
<e3 a3="A3">E3</e3>
<e4><e5/></e4>
<e6/></e1>"""))
tree.write(sys.stdout)
print # a newline after the write of unindented XML
ElementTreeUtil.indent(tree.getroot())
tree.write(sys.stdout)
print # a newline after the write of unindented XML
print xml.etree.ElementTree.tostring(tree.getroot())
ElementTreeUtil.unindent(tree.getroot())
tree.write(sys.stdout)
print # a newline after the write of unindented XML
print ElementTreeUtil.tostring(tree)
print ElementTreeUtil.tostring(tree.getroot())
print ElementTreeUtil.tostring(tree, indent=None)
|
tedlaz/pyted
|
pykoinoxrista/koinoxrista/fmy2015.py
|
Python
|
gpl-3.0
| 3,312
| 0.000367
|
# -*- coding: utf-8 -*-
import fixed_size_text as ft
nu = ft.Num()
it = ft.Int()
ymd = ft.DatYYYMMDD()
ah1 = [ft.col(8), # Ονομα Αρχείου
ft.col(8, 0, ' ', ymd), # Ημερομηνία δημιουργίας
ft.col(4), # Αρ.Κύκλου τρεξίματος
ft.col(127) # Filler
]
h1 = ft.row(0, ah1)
ah2 = [ft.col(4), # Έτος
ft.col(18), # Επώνυμο
ft.col(9), # Όνομα
ft.col(3), # Πατρώνυμο
ft.col(1), # 0=Επωνυμία, 1=Ονοματεπώνυμο
ft.col(9), # ΑΦΜ
ft.col(16), # Αντικείμενο Δραστηριότητας
ft.col(10), # Πόλη
ft.col(16), # Οδός
ft.col(5), # Αριθμός
ft.col(5), # Τ.Κ.
ft.col(51) # Filler
]
h2 = ft.row(1, ah2)
ah3 = [ft.col(16, 1, '0', nu), # Ακαθάριστες αποδοχές
ft.col(16, 1, '0', nu), # Κρατήσεις
ft.col(16, 1, '0', nu), # Καθαρές Αποδοχές
ft.col(15, 1, '0', nu), # Φόρος που αναλογεί
ft.col(15, 1, '0', nu), # Φόρος που παρακρατήθηκε
ft.col(15, 1, '0', nu), # Ειδική Εισφορά Αλληλεγγύης
ft.col(14, 1, '0', nu), # Χαρτόσημο
ft.col(13, 1, '0', nu), # ΟΓΑ Χαρτοσήμου
ft.col(27) # Filler
]
h3 = ft.row(2, ah3)
ah4 = [ft.col(9), # ΑΦΜ εργαζομένου
ft.col(1), # Filler
ft.col(18), # Επώνυμο
ft.col(9), # Όνομα
ft.col(3), # Όνομα συζύγου ή πατέρα
ft.col(11), # ΑΜΚΑ
ft.col(2, 1, '0', it), # Αριθμός παιδιών
ft.col(2), # Είδος αποδοχών 01=Τακτικές αποδοχές
ft.col(11, 1, '0', nu), # Ακαθάριστες αποδοχές
ft.col(10, 1, '0', nu), # Κρατήσεις
ft.col(11, 1, '0', nu), # Καθαρές Αποδοχές
ft.col(10, 1, '0', nu), # Φόρος που αναλογεί
ft.col(10, 1, '0', nu), # Φόρος που παρακρατήθηκε
ft.col(10, 1, '0', nu), # Ειδική Εισφορά Αλληλεγγύης
ft.col(9, 1, '0', nu), # Χαρτόσημο
ft.col(8, 1, '0', nu), # ΟΓΑ Χαρτοσήμου
ft.col(4, 1, '0', it), # Έτος Αναφοράς για Αναδρομικές αποδ.
ft.col(9), # Filler
]
h4 = ft.row(3, ah4)
r1 = h1.write(['JL10', '2016-01-15', '2015', ''])
a2 = ['2015',
u'ΑΚΤΗ ΦΑΡΑΓΓΑ ΠΑΡΟΥ ΕΠΕ',
'',
'',
0,
'999249820',
u'ΕΣΤΙΑΤΟΡΙΟ ΜΠΑΡ',
u'ΠΑΡΟΣ',
u'ΑΓΑΘΗΜΕΡΟΥ',
'3',
'
|
84400',
''
]
r2 = h2.write(a2)
a3 = [20220.98, 3575.14, 16645.84, 0, 0, 0, 0, 0, '']
r3 = h3.write(a3)
a4 = ['034140096', '', u'ΛΑΖΑΡΟΣ', u'ΘΕΟΔΩΡΟΣ', u'ΚΩΝΣΤΑΝΤ', '02108001427',
1, '01',
20220.98
|
, 3575.14, 16645.84, 0, 0, 0, 0, 0, '', ''
]
r4 = h4.write(a4)
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
rtel = r1 + r2 + r3 + r4
print(len(r1), len(r2), len(r3), len(r4))
f = open('tstfile', 'w')
f.write(rtel.encode('CP1253'))
f.close()
|
arity-r/MiniLogic
|
lex.py
|
Python
|
mit
| 1,410
| 0.055319
|
import ply.lex as lex
from ast import Node
reserved = {
'or' : 'LOR',
'and' : 'LAND',
'neg' : 'NEG',
'exists' : 'EXISTS',
'forall' : 'FORALL',
'implies': 'IMPLIES',
'iff' : 'IFF'
}
tokens = tuple(
[
'WORD',
'VARIABLE',
'CONSTANT',
'FUNCTION',
'PREDICATE',
'COMMA',
'LPAREN',
'RPAREN',
'LBRACKET',
'RBRACKET',
] + list(reserved.values())
)
t_COMMA = r','
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
def t_WORD(t):
r'[a-zA-Z][a-zA-Z]+' # HACK no reserved word has len < 2
type = reserved.get(t.value)
if not type:
t_
|
error(t)
t.type = type
t.value = Node(type, t.value)
return t
def t_VARIABLE(t):
r'[u-z]'
t.value = Node('VARIABLE', t.value)
return t
def t_CONSTANT(t):
r'[a-e]'
t.value = Node('CONSTANT', t.value)
return t
def t_FUNCTION(t):
r'[f-j]'
t.value = Node('FUNCTION', t.value)
return t
def t_PREDICATE(t):
r'[P-U]'
t.value = Node('PREDICATE', t.
|
value)
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
t_ignore = ' \t'
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
#lexer = lex.lex(optimize=1, debug=1)
if __name__ == '__main__':
data = '''neg (exists x)(forall y)[P(x,y) iff neg Q(y, x, y)]'''
lexer.input(data)
for tok in lexer:
print(tok)
|
yaqiyang/autorest
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyDuration/autorestdurationtestservice/auto_rest_duration_test_service.py
|
Python
|
mit
| 2,204
| 0.001361
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.duration_operations import DurationOperations
from . import models
class AutoRestDurationTestServiceConfiguration(Configuration):
"""Configuration for AutoRestDurationTestService
Note that all parameters used to create this instance are saved as instance
attributes.
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __init__(
self, base_url=None, filepath=None):
if not base_url:
base_url = 'https://localhost'
super(AutoRestDurationTestServiceConfiguration, self).__init__(base_url, filepath)
self.add_user_agent('autorestdurationtestservice/{}'.format(VERSION))
class AutoRestDurationTestService(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestDurationTestServiceConfiguration
:ivar duration: Duration operations
:vartype duration: .operations.DurationOperations
:param str base_url: Service URL
:param str filepath: Existing config
"""
def __ini
|
t__(
self, base_url=None, filepath=None):
self.config = AutoRestDurationTestServiceConfiguration(base_url, filepath)
self._client = ServiceClient(None, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.duration = DurationOperations(
self._clien
|
t, self.config, self._serialize, self._deserialize)
|
Cloudify-PS/cloudify-manager-blueprints
|
components/nginx/scripts/creation_validation.py
|
Python
|
apache-2.0
| 445
| 0
|
#!/usr/bin/env python
from os.path import join, dirname
from cloudify import ctx
ctx.download_resource(
join('components', 'utils.py'),
join(dirname(__file__), 'utils.py'))
import utils # NOQA
ru
|
ntime_props = ctx.instance.runtime_propert
|
ies
if utils.is_upgrade:
SERVICE_NAME = runtime_props['service_name']
utils.validate_upgrade_directories(SERVICE_NAME)
utils.systemd.verify_alive(SERVICE_NAME, append_prefix=False)
|
iansprice/wagtail
|
wagtail/contrib/modeladmin/helpers/button.py
|
Python
|
bsd-3-clause
| 7,291
| 0.000137
|
from __future__ import absolute_import, unicode_literals
from django.contrib.admin.utils import quote
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
class ButtonHelper(object):
default_button_classnames = ['button']
add_button_classnames = ['bicolor', 'icon', 'icon-plus']
inspect_button_classnames = []
edit_button_classnames = []
delete_button_classnames = ['no']
def __init__(self, view, request):
self.view = view
self.request = request
self.model = view.model
self.opts = view.model._meta
self.verbose_name = force_text(self.opts.verbose_name)
self.verbose_name_plural = force_text(self.opts.verbose_name_plural)
self.permission_helper = view.permission_helper
self.url_helper = view.url_helper
def finalise_classname(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
combined = self.default_button_classnames + classnames_add
finalised = [cn for cn in combined if cn not in classnames_exclude]
return ' '.join(finalised)
def add_button(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.add_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.create_url,
'label': _('Add %s') % self.verbose_name,
'classname': cn,
'title': _('Add a new %s') % self.verbose_name,
}
def inspect_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.inspect_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('inspect', quote(pk)),
'label': _('Inspect'),
'classname': cn,
'title': _('Inspect this %s') % self.verbose_name,
}
def edit_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.edit_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('edit', quote(pk)),
'label': _('Edit'),
'classname': cn,
'title': _('Edit this %s') % self.verbose_name,
}
def delete_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.delete_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('delete', quote(pk)),
'label': _('Delete'),
'classname': cn,
'title': _('Delete this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_excl
|
ude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
class PageButtonHelper(ButtonHelper):
unpublish_button_classn
|
ames = []
copy_button_classnames = []
def unpublish_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.unpublish_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('unpublish', quote(pk)),
'label': _('Unpublish'),
'classname': cn,
'title': _('Unpublish this %s') % self.verbose_name,
}
def copy_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.copy_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('copy', quote(pk)),
'label': _('Copy'),
'classname': cn,
'title': _('Copy this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('copy' not in exclude and ph.user_can_copy_obj(usr, obj)):
btns.append(
self.copy_button(pk, classnames_add, classnames_exclude)
)
if('unpublish' not in exclude and ph.user_can_unpublish_obj(usr, obj)):
btns.append(
self.unpublish_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
|
harterj/moose
|
python/moosetree/Node.py
|
Python
|
lgpl-2.1
| 6,708
| 0.002534
|
"""
For simplicity this module should be a stand-alone package, i.e., it should not use any
non-standard python packages such as mooseutils.
"""
import copy
from . import search
class Node(object):
"""
Base class for tree nodes that accepts arbitrary attributes.
Create a new node in the tree that is a child of *parent* with the given *name*. The supplied
*parent* must be another `Node` object. All keyword arguments are stored as "attributes" and may
be of any type.
!alert warning title=Speed is Important!
The need for this object comes from the MooseDocs package, which uses tree objects extensively.
Originally, MooseDocs used the anytree package for these structures. As the MooseDocs system
evolved as well as the amount of documentation, in particular the amount of generated HTML
output, the speed in creating the tree nodes became critical. The anytree package is robust and
well designed, but the construction of the nodes was not fast enough.
"""
def __init__(self, parent, name, **kwargs):
"""
This constructor must be as minimal as possible for speed purposes.
IMPORTANT: Do not add more items to this unless you have good reason, it will impact
MooseDocs performance greatly.
"""
self.__children = list()
self.__parent = parent
self.__name = name
self.__attributes = kwargs
if self.__parent is not None:
parent.__children.append(self)
@property
def name(self):
"""Return the name of the Node."""
return self.__name
@property
def parent(self):
"""Return the parent Node object, which is None for a root node."""
return self.__parent
@parent.setter
def parent(self, new_parent):
"""Set the parent Node object to *new_parent*, use None to remove the node from the tree."""
if (self.__parent is not None) and (self in self.__parent.__children):
self.__parent.__children.remove(self)
self.__parent = new_parent
if self.__parent is not None:
self.__parent.__children.append(self)
@property
def children(self):
"""Return a list of children.
!alert note
The list is a copy but the Node objects in the list are not.
"""
return copy.copy(self.__children)
@property
def descendants(self):
"""Return a list of all descendants, children's children etc."""
return search.iterate(self, method=search.IterMethod.PRE_ORDER)
@property
def count(self):
"""Return the number of all descendants"""
count = len(self.__children)
for child in self.__children:
count += child.count
|
return count
def __iter__(self):
"""Iterate of the children (e.g., `for child in node:`)"""
return iter(self.__children)
def insert(self, idx, child):
|
"""Insert a nod *child* before the supplied *idx* in the list of children."""
self.__children.insert(idx, child)
child.__parent = self
@property
def path(self):
"""Return the nodes that lead to the root node of the tree from this node."""
nodes = [self]
parent = self.__parent
while parent is not None:
nodes.insert(0, parent)
parent = parent.parent
return nodes
@property
def root(self):
"""Return the root node of the tree."""
return self.path[0]
@property
def is_root(self):
"""Return True if the Node is a root, i.e., is the parent node object set to None."""
return self.__parent is None
@property
def siblings(self):
"""Return a list of sibling nodes."""
if self.__parent is not None:
children = self.__parent.children
children.remove(self)
return children
return []
@property
def previous(self):
"""Return the previous sibling, if it exists."""
if (self.__parent is not None) and (self.__parent.__children):
idx = self.__parent.__children.index(self)
if idx > 0:
return self.__parent.__children[idx-1]
@property
def next(self):
"""Return the next sibling, if it exists."""
if (self.__parent is not None) and (self.__parent.__children):
idx = self.__parent.__children.index(self)
if idx < len(self.__parent.__children) - 1:
return self.__parent.__children[idx+1]
def __call__(self, *args):
"""Return child nodes based on index."""
child = self
for index in args:
child = child.__children[index]
return child
@property
def attributes(self):
"""Return the a 'attributes' (key, value pairs supplied in construction) for this node."""
return self.__attributes
def __getitem__(self, key):
"""Retrieve an attribute using operator[]."""
return self.__attributes[key]
def __setitem__(self, key, value):
"""Set an attribute using operator[]."""
self.__attributes[key] = value
def __contains__(self, key):
"""Test if an attribute exists using the 'in' keyword."""
return key in self.__attributes
def get(self, key, default=None):
"""Return the value of an attribute *key* or *default* if it does not exist."""
return self.__attributes.get(key, default)
def items(self):
"""Return the dict() iterator to the attributes, i.e., `k, v in node.items()`."""
return self.__attributes.items()
def __len__(self):
"""Return the number of children."""
return len(self.__children)
def __bool__(self):
"""If this class exists then it should evaluate to True."""
return True
def __str__(self):
"""Return a unicode string showing the tree structure."""
return self.__print()
def __repr__(self):
"""Return the 'name' of the object as it should be printed in the tree."""
if self.__attributes:
return '{}: {}'.format(self.name, repr(self.__attributes))
return self.name
def __print(self, indent=u''):
"""Helper function printing to the screen."""
if (self.parent is None) or (self.parent.children[-1] is self):
out = u'{}\u2514\u2500 {}\n'.format(indent, repr(self))
indent += u" "
else:
out = u'{}\u251c\u2500 {}\n'.format(indent, repr(self))
indent += u"\u2502 "
for child in self.children:
out += child.__print(indent)
return out
|
cloudify-cosmo/cloudify-manager
|
tests/integration_tests/resources/dsl/scripts/workflows/test_deployment_id_parameters.py
|
Python
|
apache-2.0
| 664
| 0
|
import sys
import json
def test_parameter(name, value):
assert value is not None
print("Tested parameter '{0}' is {1}".format(name, value))
if __name__ == '__main__':
with open("{0}/input.json".format(sys.argv[1]), 'r') as fh:
data = json.load(fh)
parameters = data.get('kwargs', {})
expected_parameters = parameters.pop('to_be_tested')
for k, v in parameters.items():
if k in expected_parameters:
|
test_parameter(k, v)
expected_parameters.remove(k)
if expected_parameters:
raise Exception("These parameters were not
|
tested: {0}"
.format(expected_parameters))
|
libvirt/autotest
|
client/bin/package_unittest.py
|
Python
|
gpl-2.0
| 5,565
| 0.001617
|
#!/usr/bin/python
import unittest, os
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.common_lib.test_utils import mock
from autotest_lib.client.bin import package, os_dep, utils
class TestPackage(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
self.god.stub_function(os_dep, "command")
def tearDown(self):
self.god.unstub_all()
def info_common_setup(self, input_package, result):
self.god.stub_function(os.path, "isfile")
self.god.stub_function(utils, "system_output")
self.god.stub_function(utils, "system")
# record
os.path.isfile.expect_call(input_package).and_return(True)
utils.system_output.expect_call(
'file ' + input_package).and_return(result)
utils.system_output.expect_call(
'file ' + input_package).and_return(result)
def test_info_rpm(self):
# setup
input_package = "package.rpm"
file_result = "rpm"
ver = '1.0'
# common setup
self.info_common_setup(input_package, file_result)
# record
package_info = {}
package_info['type'] = 'rpm'
os_dep.command.expect_call('rpm')
s_cmd = 'rpm -qp --qf %{SOURCE} ' + input_package + ' 2>/dev/null'
a_cmd = 'rpm -qp --qf %{ARCH} ' + input_package + ' 2>/dev/null'
v_cmd = 'rpm -qp ' + input_package + ' 2>/dev/null'
utils.system_output.expect_call(v_cmd).and_return(ver)
i_cmd = 'rpm -q ' + ver + ' 2>&1 >/dev/null'
package_info['system_support'] = True
utils.system_output.expect_call(s_cmd).and_return('source')
package_info['source'] = True
utils.system_output.expect_call(v_cmd).and_return(ver)
package_info['version'] = ver
utils.system_output.expect_call(a_cmd).and_return('586')
package_info['arch'] = '586'
utils.system.expect_call(i_cmd)
package_info['installed'] = True
# run and check
info = package.info(input_package)
self.god.check_playback()
self.assertEquals(info, package_info)
def test_info_dpkg(self):
# setup
input_package = "package.deb"
file_result = "debian"
ver = '1.0'
# common setup
self.info_common_setup(input_package, file_result)
# record
package_info = {}
package_info['type'] = 'dpkg'
package_info['source'] = False
os_dep.command.expect_call('dpkg')
a_cmd = 'dpkg -f ' + input_package + ' Architecture 2>/dev/null'
v_cmd = 'dpkg -f ' + input_package + ' Package 2>/dev/null'
utils.system_output.expect_call(v_cmd).and_return(ver)
i_cmd = 'dpkg -s ' + ver + ' 2>/dev/null'
package_info['system_support'] = True
utils.system_output.expect_call(v_cmd).and_return(ver)
package_info['version'] = ver
utils.system_output.expect_call(a_cmd).and_return('586')
package_info['arch'] = '586'
utils.system_output.expect_call(i_cmd,
ignore_status=True).and_return('installed')
package_info['installed'] = True
# run and check
info = package.info(input_package)
self.god.check_playback()
self.as
|
sertEquals(info, package_info)
def test_install(self):
# setup
input_package = "package.rpm"
self.god.stub_function(package, "info")
self.god.stub_function(utils, "system")
# record
package_info = {}
package_info['type'] = 'rpm'
package_info['system_support'] = True
package_info['source'] = True
package_info['installed'] = True
package.info
|
.expect_call(input_package).and_return(package_info)
install_command = 'rpm %s -U %s' % ('', input_package)
utils.system.expect_call(install_command)
# run and test
package.install(input_package)
self.god.check_playback()
def test_convert(self):
os_dep.command.expect_call('alien')
dest_format = 'dpkg'
input_package = "package.rpm"
output = "package_output.deb"
# record
self.god.stub_function(utils, "system_output")
utils.system_output.expect_call(
'alien --to-deb %s 2>/dev/null' % input_package).and_return(output)
# run test
package.convert(input_package, dest_format)
self.god.check_playback()
def test_os_support_full(self):
# recording
exp_support = {}
for package_manager in package.KNOWN_PACKAGE_MANAGERS:
os_dep.command.expect_call(package_manager)
exp_support[package_manager] = True
os_dep.command.expect_call('alien')
exp_support['conversion'] = True
# run and test
support = package.os_support()
self.god.check_playback()
self.assertEquals(support, exp_support)
def test_os_support_none(self):
# recording
exp_support = {}
for package_manager in package.KNOWN_PACKAGE_MANAGERS:
os_dep.command.expect_call(package_manager).and_raises(ValueError)
exp_support[package_manager] = False
os_dep.command.expect_call('alien').and_raises(ValueError)
exp_support['conversion'] = False
# run and test
support = package.os_support()
self.god.check_playback()
self.assertEquals(support, exp_support)
if __name__ == "__main__":
unittest.main()
|
unkyulee/elastic-cms
|
src/task/modules/CMD.py
|
Python
|
mit
| 530
| 0.00566
|
from subprocess import PIPE, Popen
from sqlalchemy import create_engine
def run(p):
try:
p["log"].info(p["action"]['query'])
proc = Popen(p["action"]['query'], shell=True,
stdin
|
=PIPE, stdout=PIPE, stderr=PIPE)
result = proc.communicate()
message = ''
fo
|
r r in result:
if r: message += r + '\n'
p["log"].success(message)
except Exception, e:
AllGood = False
p["log"].error("command line execution failed",e)
return True
|
benglard/Rhetorical-Analysis
|
ParallelismFinder.py
|
Python
|
mit
| 2,891
| 0.012452
|
from nltk import *
from nltk.corpus import brown
class ParallelismFinder:
def __init__(self):
self.f = ""
self.counter = 0
self.para = [] #array to hold instances of parallelism
self.tokenizer = RegexpTokenizer('\w+') #remove punctuation which could mess up finding parallelism
#Train tagger with subset of Brown News Corpus
brown_news_tagged = brown.tagged_sents(categories='news')
brown_train = brown_news_tagged
self.tagger = UnigramTagger(brown_train) #Unigram Tagger based on Brown corpus
#Path is inputted from AIPController
#Returns parallelism counter
def sendFile(self, path):
self.f = open(path)
for line in self.f:
try:
self.get_all_parallelism(line)
except:
continue
|
c = self.counter
self.counter = 0
self.para = [] #re-initialize to empty array
return c
#Returns the parallelism counter
def get_all_parallelism(self, line):
sent = self.tokenizer.tokenize(line)
tags = self.tagger.tag(sent)
self.get_phrase_parallelism(tags, 1)
self.get_phrase_parallelism(tags, 2) #Pairs of words
self.get_phrase_parallelism(tags, 3) #Triplets of words
self.get_phrase_par
|
allelism(tags, 4) #Group of 4 words
#Get parallelism between n_para # of words
#Ex: the a
#Ex: the bird, the word
#Ex2: I came, I saw, I conquered
#Ex: Of the people, by the people, for the people
#Ex: the people are good, the people are bad
def get_phrase_parallelism(self, tags, n_para):
tagged1, tagged2 = [], []
words1, words2 = [], []
for n in range(0, len(tags)-n_para, n_para):
try:
tag_subset = tags[n:n+n_para]
tag = self.get_tags(tag_subset)
tagged1.append([tag])
tag_subset = tags[n+n_para:n+(2*n_para)]
tag = self.get_tags(tag_subset)
tagged2.append([tag])
word_subset = tags[n:n+n_para]
words1 = self.get_words(word_subset)
word_subset = tags[n+n_para:n+(2*n_para)]
words2 = self.get_words(word_subset)
if tagged1 == tagged2:
self.para.append([words1, words2])
self.counter += 1
tagged1, tagged2 = [], []
words1, words2 = [], []
except:
continue
#Get tags of phrases for comparison
def get_tags(self, tag_sub):
ret = []
for t in tag_sub:
ret.append(t[1])
return ret
#Get words of phrases for entrance into instance array
def get_words(self, word_sub):
ret = []
for t in word_sub:
ret.append(t[0])
return ret
|
martwo/ndhist
|
test/ndhist/log10_axis_test.py
|
Python
|
bsd-2-clause
| 788
| 0.005076
|
import unittest
import numpy as np
impo
|
rt ndhist
class Test(unittest.TestCase):
def test_log10_axis_1D(self):
"""Tests if the log10_axis works with the ndhist object for 1D
histograms.
"""
axis_0 = ndhist.axes.log10(0.1, 100, 0.1)
self.assertTrue(axis_0.nbins == 32)
h = ndhist.ndhist((axis_0,))
self.assertTrue(np.any(h.binentries) == F
|
alse)
self.assertTrue(np.any(h.bincontent) == False)
h.fill([0.1, 0.2, 99.])
self.assertTrue(np.all(h.bincontent == np.array([
1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 1.])))
if(__name__ == "__main__"):
unittest.main()
|
FlintHill/SUAS-Competition
|
UpdatedImageProcessing/UpdatedImageProcessing/TargetDetection/__init__.py
|
Python
|
mit
| 668
| 0.002994
|
from .settings import Settings
from .logger import Logger
from .detection_result_recorder import DetectionResultRecorder
from .color_operations import ColorOperations
from
|
.target_analyzer import TargetAnalyzer
from .background_color_nullifier import BackgroundColorNullifier
from .target_detectors import TargetDetectors
from .false_positive_eliminators
|
import FalsePositiveEliminators
from .integrated_target_detection_process import IntegratedTargetDetectionProcess
from .integrated_target_capturing_process import IntegratedTargetCapturingProcess
from .single_target_map_detector import SingleTargetMapDetector
from .mass_target_detector import MassTargetDetector
|
justinfx/AtomSplitter
|
ui/__init__.py
|
Python
|
gpl-3.0
| 776
| 0.005155
|
"""
Copyright (c) 2010 cmiVFX.com <info@cmivfx.com>
This file is part of AtomSplitter.
AtomSplitter is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, ei
|
ther version 3 of the License, or
(at your option) any later version.
AtomSplitter is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTI
|
CULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with AtomSplitter. If not, see <http://www.gnu.org/licenses/>.
Written by: Justin Israel
justinisrael@gmail.com
justinfx.com
"""
|
KrisCheng/ML-Learning
|
archive/Model/sequence/time_series/transform.py
|
Python
|
mit
| 938
| 0.00533
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# how to transform with pandas
from pandas import Series
from pandas import DataFrame
from scipy.stats import boxcox
from matplotlib import pyplot
series = Series.from_csv( "airline-passengers.csv" , header=0)
dataframe = DataFrame(series.values)
dataframe.columns = ['passengers']
# print(dataframe['passengers'])
dataframe['pas
|
sengers'], lam = boxcox(dataframe['passengers'])
print( "Lambda: %f" % lam)
pyplot.figure(1)
# line plot
pyplot.subplot(211)
pyplot.plot(dataframe['passengers'])
# histogram
pyplot.subplot(212)
pyplot.hist(dataframe['passengers'])
pyplot.show()
# dataframe = DataFrame(series.values)
# dataframe.columns = ["passengers"]
# dataframe["passengers"] = sqrt(dataframe["passengers"])
# pyplot.figure(1)
# # line plot
# pyplot.subplot(211)
# pyplo
|
t.plot(dataframe["passengers"])
# # histogram
# pyplot.subplot(212)
# pyplot.hist(dataframe["passengers"])
pyplot.show()
|
ESS-LLP/frappe
|
frappe/desk/form/save.py
|
Python
|
mit
| 1,963
| 0.030056
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.desk.form.load import run_onload
@frappe.whitelist()
def savedocs(doc, action):
"""save / submit / update doclist"""
try:
doc = frappe.get_doc(json.loads(doc))
set_local_name(doc)
# action
doc.docstatus = {"Save":0, "Submit": 1, "Update": 1, "Cancel": 2}[action]
if doc.docstatus==1:
doc.submit()
else:
try:
doc.save()
except frappe.NameError as e:
doctype, name, original_exception = e if isinstance(e, tuple) else (doc.doctype or "", doc.name or "", None)
frappe.msgprint(frappe._("{0} {1} already exists").format(doctype, name))
raise
# update recent documents
run_onload(doc)
frappe.get_user().update_recent(doc.doctype, doc.name)
send_updated_docs(doc)
except Exception:
if not frappe.local.message_log:
frappe.msgprint(frappe._('Did not save'))
frappe.errprint(frappe.utils.get_traceback())
raise
@frappe.whitelist()
def cancel(doctype=None, name=None, workflow_state_fieldname=None, workflow_state=None):
"""cancel a doclist"""
try:
doc = frappe.get_doc(doctype, name)
if workflow_state_fieldname and workflow_state:
doc.set(
|
workflow_state_fieldname, workflow_state)
doc.cancel()
send_updated_docs(doc)
exc
|
ept Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.msgprint(frappe._("Did not cancel"))
raise
def send_updated_docs(doc):
from .load import get_docinfo
get_docinfo(doc)
d = doc.as_dict()
if hasattr(doc, 'localname'):
d["localname"] = doc.localname
frappe.response.docs.append(d)
def set_local_name(doc):
def _set_local_name(d):
if doc.get('__islocal') or d.get('__islocal'):
d.localname = d.name
d.name = None
_set_local_name(doc)
for child in doc.get_all_children():
_set_local_name(child)
if doc.get("__newname"):
doc.name = doc.get("__newname")
|
hozblok/biodraw
|
draw/migrations/0007_auto_20160318_2122.py
|
Python
|
gpl-3.0
| 475
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-18 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('draw', '0006_auto_20160314_1817')
|
,
]
|
operations = [
migrations.AlterField(
model_name='physicalentity',
name='display_name',
field=models.CharField(blank=True, max_length=1000),
),
]
|
rebost/pybsd
|
src/pybsd/commands/ezjail_admin.py
|
Python
|
bsd-3-clause
| 5,154
| 0.000582
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import lazy
from ..exceptions import InvalidOutputError, SubprocessError, WhitespaceError
from .base import BaseCommand
__logger__ = logging.getLogger('pybsd')
class EzjailAdmin(BaseCommand):
"""Provides an interface to the ezjail-admin command"""
name = 'ezjail-admin'
@property
def binary(self):
return self.env.ezjail_admin_binary
def check_kwargs(self, subcommand, **kwargs):
# make sure there is no whitespace in the arguments
for key, value in kwargs.items():
if value is None:
continue
if subcommand == 'console' and key == 'cmd':
continue
if len(value.split()) != 1:
raise WhitespaceError(self, self.env, key, value, subcommand)
@lazy.lazy
def list_headers(self):
"""
rc: command return code
out: command stdout
err: command stderr
"""
rc, out, err = self.invoke('list')
if rc:
raise SubprocessError(self, self.env, err.strip(), 'list_headers')
lines = out.splitlines()
if len(lines) < 2:
raise InvalidOutputError(self, self.env, u'output too short', 'list')
headers = []
current = ''
for pos, char in enumerate(lines[1]):
if char != '-' or pos >= len(lines[0]):
headers.append(current.strip())
if pos
|
>= len(lines[0]):
break
current = ''
else:
current = current + lines[0][pos]
if headers != ['STA', 'JID', 'IP', 'Hostname', 'Root Directory']:
raise InvalidOutputE
|
rror(self, self.env, u"output has unknown headers\n['{}']".format(u"', '".join(headers)), 'list')
return ('status', 'jid', 'ip', 'name', 'root')
def list(self):
headers = self.list_headers
rc, out, err = self.invoke('list')
if rc:
raise SubprocessError(self, self.env, err.strip(), 'list')
lines = out.splitlines()
jails = {}
current_jail = None
for line in lines[2:]:
if line[0:4] != ' ':
line = line.strip()
if not line:
continue
entry = dict(zip(headers, line.split()))
entry['ips'] = [entry['ip']]
current_jail = jails[entry.pop('name')] = entry
else:
line = line.strip()
if not line:
continue
if_ip = line.split()[1]
ip = if_ip.split('|')[1]
current_jail['ips'].append(ip)
return jails
def console(self, cmd, jail_name):
self.check_kwargs('console', cmd=cmd, jail_name=jail_name)
rc, out, err = self.invoke('console',
'-e',
cmd,
jail_name)
return out
# subcommands to be implemented:
# def __ezjail_admin(self, subcommand, **kwargs):
# # make sure there is no whitespace in the arguments
# for key, value in kwargs.items():
# if value is None:
# continue
# if subcommand == 'console' and key == 'cmd':
# continue
# if len(value.split()) != 1:
# __logger__.error('The value `%s` of kwarg `%s` contains whitespace', value, key)
# sys.exit(1)
# if subcommand == 'console':
# return self._ezjail_admin(
# 'console',
# '-e',
# kwargs['cmd'],
# kwargs['name'])
# elif subcommand == 'create':
# args = [
# 'create',
# '-c', 'zfs']
# flavour = kwargs.get('flavour')
# if flavour is not None:
# args.extend(['-f', flavour])
# args.extend([
# kwargs['name'],
# kwargs['ip']])
# rc, out, err = self._ezjail_admin(*args)
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'create')
# elif subcommand == 'delete':
# rc, out, err = self._ezjail_admin(
# 'delete',
# '-fw',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'delete')
# elif subcommand == 'start':
# rc, out, err = self._ezjail_admin(
# 'start',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'start')
# elif subcommand == 'stop':
# rc, out, err = self._ezjail_admin(
# 'stop',
# kwargs['name'])
# if rc:
# raise SubprocessError(self, self.env, err.strip(), 'stop')
# else:
# raise ValueError('Unknown subcommand `%s`' % subcommand)
|
codeback/openerp-cbk_company_web_discount
|
res_company.py
|
Python
|
agpl-3.0
| 1,385
| 0.003615
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# res_partner
# Copyright (c) 2013 Codeback Soft
|
ware S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier
|
Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from datetime import datetime, timedelta
from openerp.tools.translate import _
class res_company(osv.osv):
"""añadimos los nuevos campos"""
_name = "res.company"
_inherit = "res.company"
_columns = {
'web_discount': fields.float('Descuento web (%)'),
}
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/sfa/rspecs/elements/pltag.py
|
Python
|
apache-2.0
| 161
| 0.012422
|
from openflow.optin_man
|
ager.sfa.rspecs.elements.ele
|
ment import Element
class PLTag(Element):
fields = [
'tagname',
'value',
]
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/igp_shortcuts/afi/__init__.py
|
Python
|
apache-2.0
| 25,266
| 0.001583
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class afi(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/igp-shortcuts/afi. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Address-family list.
"""
__slots__ = ("_path_helper", "_extmethods", "__afi_name", "__config", "__state")
_yang_name = "afi"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__afi_name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynC
|
lass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True
|
,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"igp-shortcuts",
"afi",
]
def _get_afi_name(self):
"""
Getter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/afi_name (leafref)
YANG Description: Reference to address-family type.
"""
return self.__afi_name
def _set_afi_name(self, v, load=False):
"""
Setter method for afi_name, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/afi_name (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_afi_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_afi_name() directly.
YANG Description: Reference to address-family type.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError(
"Cannot set keys directly when" + " within an instantiated list"
)
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """afi_name must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="afi-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__afi_name = t
if hasattr(self, "_set"):
self._set()
def _unset_afi_name(self):
self.__afi_name = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="afi-name",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
is_keyval=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/config (container)
YANG Description: This container defines ISIS Shortcuts configuration parameters
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/igp_shortcuts/afi/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS Shortcuts configuration parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
|
gotostack/iSwift
|
iswift/privatefiles/models.py
|
Python
|
apache-2.0
| 1,365
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS
|
IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permi
|
ssions and limitations
# under the License.
from django.db import models
class users_folder_tree(models.Model):
name = models.CharField(max_length=256)
type = models.IntegerField(max_length=128)
parentID = models.IntegerField(max_length=128)
isFile = models.BooleanField()
sizebyte = models.IntegerField(max_length=128)
level = models.IntegerField(max_length=128)
companyid = models.IntegerField(max_length=128)
user_id = models.IntegerField(max_length=128)
isContainer = models.BooleanField()
competence = models.IntegerField(max_length=128)
MD5string = models.CharField(max_length=256)
SHA1string = models.CharField(max_length=256)
CRC32string = models.CharField(max_length=256)
FileLink = models.FloatField(max_length=128)
isDeleted = models.IntegerField(max_length=128)
|
HPPTECH/hpp_IOSTressTest
|
Refer/IOST_OLD_SRC/IOST_0.20/Libs/IOST_WMain/IOST_WMain_SATA.py
|
Python
|
mit
| 7,573
| 0.007659
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WMain/IOST_WMainSATA.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Basic import *
from IOST_Config import *
from IOST_Testcase import *
import gtk
import gtk.glade
import gobject
#======================================================================
try:
IOST_DBG_EN
if IOST_DBG_EN:
IOST_WMainSATA_DebugEnable =1
else:
IOST_WMainSATA_DebugEnable =0
except:
IOST_DBG_EN = False
IOST_WMainSATA_DebugEnable =0
#======================================================================
class IOST_WMain_SATA():
"""
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename, window_name, builder=None):
"""
"""
self.IOST_WMainSATA_WindowName = window_name
if not builder:
self.IOST_WMainSATA_Builder = gtk.Builder()
self.IOST_WMainSATA_Builder.add_from_file(glade_filename)
self.IOST_WMainSATA_Builder.connect_signals(self)
else:
self.IOST_WMainSATA_Builder = builder
#----------------------------------------------------------------------
def SetValueToSATA_Obj(self, window_name):
"""
Init all SATA objects when start IOST Wmain program
"""
if self.IOST_Data["SATA"] == "Enable":
self.IOST_Objs[window_name]["_IP_Enable_SATA_CB"].set_active(True)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
if self.IOST_Data["SATA"+str(i)][0] == "Disable":
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name]["_IP_Enable_SATA_CB"].set_active(False)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Data["SATA"+str(i)+"_TestCaseNum"] = len(self.IOST_Data["SATA"+str(i)]) - 1
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA0_B_clicked(self, object, data=None):
"Control to ConfigSATA-0 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA0")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA0_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA0"][0] = 'Enable'
else:
self.IOST_Data["SATA0"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA0"][0], "IOST_Data->SATA0_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA1_B_clicked(self, object, data=None):
"Control to ConfigSATA-1 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA1")
#----------------------------------------------------------------------
|
def on_IOST_WMain_Config_SATA1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA1_CB"].get_active()
self.IOST_Objs[sel
|
f.IOST_WMainSATA_WindowName]["_Config_SATA1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA1"][0] = 'Enable'
else:
self.IOST_Data["SATA1"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA1"][0], "IOST_Data->SATA1_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA2_B_clicked(self, object, data=None):
"Control to ConfigSATA-2 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA2")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA2_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA2_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA2_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA2"][0] = 'Enable'
else:
self.IOST_Data["SATA2"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA2"][0], "IOST_Data->SATA2_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA3_B_clicked(self, object, data=None):
"Control to ConfigSATA-3 button "
self.WSetupTestcase_SetupTestcase("IOST_WSetupTestcase", "_Skylark", "SATA3")
#----------------------------------------------------------------------
def on_IOST_WMain_Config_SATA3_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA3_CB"].get_active()
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA3_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["SATA3"][0] = 'Enable'
else:
self.IOST_Data["SATA3"][0] = 'Disable'
if IOST_WMainSATA_DebugEnable:
iost_print(IOST_DBG_L06, self.IOST_Data["SATA3"][0], "IOST_Data->SATA3_Enable")
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_SATA_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_IP_Enable_SATA_CB"].get_active()
self.IOST_WMain_SATA_set_sensitive_all(Res)
if Res:
self.IOST_Data["SATA"] = 'Enable'
else:
self.IOST_Data["SATA"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_SATA_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["SATA_PortNum"]):
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["SATA"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMainSATA_WindowName]["_Config_SATA"+str(i)+"_B"].set_sensitive(False)
|
yrahul3910/chatting-style-recognizer
|
classify.py
|
Python
|
mit
| 2,888
| 0.009349
|
import pandas as pd
import numpy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
content = []
data = []
common = []
def remove_non_ascii(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
def remove_words(array):
final = []
words = []
for line in array:
words = line.split()
for x in common:
if x in words:
words.remove(x)
# unneeded words removed, join them
new_line = ' '.join(words)
final.append(new_line)
return final
# 1.txt is where the first person's chat history is put
# and used to train the algorithm
with open("1.txt", encoding="utf8") as f:
content = f.readlines()
# common is a list of 100 most common English words
with open("common") as f:
common = f.readlines()
# Remove non-ASCII and common words from input
content = [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person1"]) # First create 2D arrays
# Same thing with second person
with open("2.txt", encoding="utf8") as f:
content = f.readlines()
content = [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person2"])
# Third person
with open("3.txt", encoding="utf8") as f:
content = f.readlines()
content = [w.replace("\n", '') for w in content]
content = [remove_non_ascii(w) for w in content]
content = remove_words(content)
for i in content:
data.append([i, "Person3"])
# You could add more people here
data = [[remove_non_ascii(item) for item in row] for row in data]
# We have data in the 2D array. Now we gotta convert to numpy array
data_frame = pd.DataFrame(data, columns=list('xy'))
# Shuffle data for randomness
data_frame = data_frame.reindex(numpy.random.permutation(data_frame.index))
# Create feautre vectors
count_vectorizer = CountVectorizer()
counts = count_vectorizer.fit_transform(data_frame['x'].values)
# Create a Multinomial Naive Bayes classifier with Laplace smoothing
# alpha parameter is 1 by default so it uses Laplace smoothing
classifier = MultinomialNB()
targets = data_frame['y'].values
classifier.fit(counts, targets)
success = 0
fail = 0
sample = [] # Put the test data in sample array
# Below file contains test data for first person
# You can substitute test d
|
ata for any person
with open("test_P1.txt", encoding="utf8") as f:
sample = f.readlines()
sample = [w.replace("\n", '') for w in sample]
sample = [remove_non_ascii(w) for w in sample]
sample = remove_words(sample)
sample_count = count_vectorizer.transform(sample)
predictions = classifier.predict(sample_count)
for i in
|
predictions:
if i == "Person1":
success += 1
else:
fail += 1
print("Success="+str(success)+"\nFail="+str(fail))
print("Success%="+str(success*100/(fail+success))[0:5])
|
vivisect/synapse
|
synapse/tests/test_lib_datfile.py
|
Python
|
apache-2.0
| 360
| 0.002778
|
import os
import unittest
import synapse
import synapse.li
|
b.datfile as s_datfile
from synapse.tests.common import *
syndir = os.path.dirname(synapse.__file__)
class DatFileTest(SynTest):
def test_datfile_basic(self):
with s_datfile.openDatF
|
ile('synapse.tests/test.dat') as fd:
self.nn(fd)
self.eq(fd.read(), b'woot\n')
|
dsm054/pandas
|
pandas/tests/series/methods/test_explode.py
|
Python
|
bsd-3-clause
| 4,090
| 0.001467
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def test_basic():
s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_mixed_type():
s = pd.Series(
[[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"
)
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, None, np.nan, "a", "b"],
index=[0, 0, 0, 1, 2, 3, 4, 4],
dtype=object,
name="foo",
)
tm.assert_series_equal(result, expected)
def test_empty():
s = pd.Series(dtype=object)
result = s.explode()
expected = s.copy()
tm.assert_series_equal(result, expected)
def test_nested_lists():
s = pd.Series([[[1, 2, 3]], [1, 2], 1])
result = s.explode()
expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])
tm.assert_series_equal(result, expected)
def test_multi_index():
s = pd
|
.Series(
[[0, 1, 2
|
], np.nan, [], (3, 4)],
name="foo",
index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),
)
result = s.explode()
index = pd.MultiIndex.from_tuples(
[("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],
names=["foo", "bar"],
)
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_large():
s = pd.Series([range(256)]).explode()
result = s.explode()
tm.assert_series_equal(result, s)
def test_invert_array():
df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})
listify = df.apply(lambda x: x.array, axis=1)
result = listify.explode()
tm.assert_series_equal(result, df["a"].rename())
@pytest.mark.parametrize(
"s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]
)
def non_object_dtype(s):
result = s.explode()
tm.assert_series_equal(result, s)
def test_typical_usecase():
df = pd.DataFrame(
[{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],
columns=["var1", "var2"],
)
exploded = df.var1.str.split(",").explode()
result = df[["var2"]].join(exploded)
expected = pd.DataFrame(
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
columns=["var2", "var1"],
index=[0, 0, 0, 1, 1, 1],
)
tm.assert_frame_equal(result, expected)
def test_nested_EA():
# a nested EA array
s = pd.Series(
[
pd.date_range("20170101", periods=3, tz="UTC"),
pd.date_range("20170104", periods=3, tz="UTC"),
]
)
result = s.explode()
expected = pd.Series(
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
)
tm.assert_series_equal(result, expected)
def test_duplicate_index():
# GH 28005
s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
result = s.explode()
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
tm.assert_series_equal(result, expected)
def test_ignore_index():
# GH 34932
s = pd.Series([[1, 2], [3, 4]])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
def test_explode_sets():
# https://github.com/pandas-dev/pandas/issues/35614
s = pd.Series([{"a", "b", "c"}], index=[1])
result = s.explode().sort_values()
expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])
tm.assert_series_equal(result, expected)
def test_explode_scalars_can_ignore_index():
# https://github.com/pandas-dev/pandas/issues/40487
s = pd.Series([1, 2, 3], index=["a", "b", "c"])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
|
twilio/twilio-python
|
twilio/rest/taskrouter/v1/workspace/task/reservation.py
|
Python
|
mit
| 38,257
| 0.0046
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ReservationList(ListResource):
def __init__(self, version, workspace_sid, task_sid):
"""
Initialize the ReservationList
:param Version version: Version that contains the resource
:param workspace_sid: The SID of the Workspace that this task is contained within.
:param task_sid: The SID of the reserved Task resource
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
"""
super(ReservationList, self).__init__(version)
# Path Solution
self._solution = {'workspace_sid': workspace_sid, 'task_sid': task_sid, }
self._uri = '/Workspaces/{workspace_sid}/Tasks/{task_sid}/Reservations'.format(**self._solution)
def stream(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Streams ReservationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
|
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up
|
to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(reservation_status=reservation_status, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, reservation_status=values.unset, limit=None, page_size=None):
"""
Lists ReservationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance]
"""
return list(self.stream(reservation_status=reservation_status, limit=limit, page_size=page_size, ))
def page(self, reservation_status=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of ReservationInstance records from the API.
Request is executed immediately
:param ReservationInstance.Status reservation_status: Returns the list of reservations for a task with a specified ReservationStatus
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
data = values.of({
'ReservationStatus': reservation_status,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ReservationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ReservationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ReservationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ReservationContext
:param sid: The SID of the TaskReservation resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ReservationContext
:param sid: The SID of the TaskReservation resource to fetch
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationContext
"""
return ReservationContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Taskrouter.V1.ReservationList>'
class ReservationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ReservationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param workspace_sid: The SID of the Workspace that this task is contained within.
:param task_sid: The SID of the reserved Task resource
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationPage
"""
super(ReservationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ReservationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationInstance
"""
return ReservationInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['t
|
segler-alex/kodi-radio-browser
|
main.py
|
Python
|
gpl-3.0
| 10,729
| 0.005313
|
import sys
import urllib
import urllib2
import urlparse
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
import json
import base64
addonID = 'plugin.audio.radiobrowser'
addon = xbmcaddon.Addon(id=addonID)
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
xbmcplugin.setContent(addon_handle, 'songs')
my_stations = {}
profile = xbmc.translatePath(addon.getAddonInfo('profile')).decode("utf-8")
mystations_path = profile+'/mystations.json'
import socket
import random
def get_radiobrowser_base_urls():
"""
Get all base urls of all currently available radiobrowser servers
Returns:
list: a list of strings
"""
hosts = []
# get all hosts from DNS
ips = socket.getaddrinfo('all.api.radio-browser.info',
80, 0, 0, socket.IPPROTO_TCP)
for ip_tupple in ips:
ip = ip_tupple[4][0]
# do a reverse lookup on every one of the ips to have a nice name for it
host_addr = socket.gethostbyaddr(ip)
# add the name to a list if not already in there
if host_addr[0] not in hosts:
hosts.append(host_addr[0])
# sort list of names
random.shuffle(hosts)
# add "https://" in front to make it an url
xbmc.log("Found hosts: " + ",".join(hosts))
return list(map(lambda x: "https://" + x, hosts))
def LANGUAGE(id):
# return id
# return "undefined"
return addon.getLocalizedString(id).encode('utf-8')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def addLink(stationuuid, name, url, favicon, bitrate):
li = xbmcgui.ListItem(name, iconImage=favicon)
li.setProperty('IsPlayable', 'true')
li.setInfo(type="Music", infoLabels={ "Title":name, "Size":bitrate})
localUrl = build_url({'mode': 'play', 'stationuuid': stationuuid})
if stationuuid in my_stations:
contextTitle = LANGUAGE(32009)
contextUrl = build_url({'mode': 'delstation', 'stationuuid': stationuuid})
else:
contextTitle = LANGUAGE(32010)
contextUrl = build_url({'mode': 'addstation', 'stationuuid': stationuuid, 'name': name.encode('utf-8'), 'url': url, 'favicon': favicon, 'bitrate': bitrate})
li.addContextMenuItems([(contextTitle, 'RunPlugin(%s)'%(contextUrl))])
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=False)
def downloadFile(uri, param):
"""
Download file with the correct headers set
Returns:
a string result
"""
paramEncoded = None
if param != None:
paramEncoded = json.dumps(param)
xbmc.log('Request to ' + uri + ' Params: ' + ','.join(param))
else:
xbmc.log('Request to ' + uri)
req = urllib2.Request(uri, paramEncoded)
req.add_header('User-Agent', 'KodiRadioBrowser/1.2.0')
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
data=response.read()
response.close()
return data
def downloadApiFile(path, param):
"""
Download file with relative url from a random api server.
Retry with other api servers if failed.
Returns:
a string result
"""
servers = get_radiobrowser_base_urls()
i = 0
for server_base in servers:
xbmc.log('Random server: ' + server_base + ' Try: ' + str(i))
uri = server_base + path
try:
data = downloadFile(uri, param)
return data
except Exception as e:
xbmc.log("Unable to download from api url: " + uri, xbmc.LOGERROR)
pass
i += 1
return {}
def addPlayableLink(data):
dataDecoded = json.loads(data)
for station in dataDecoded:
addLink(station['stationuuid'], station['name'], station['url'], station['favicon
|
'], station['bitrate'])
def readFile(filepath):
with open(filepath, 'r') as read_file:
return json.load(read_file)
def writeFile(filepath, data):
with open(filepath, 'w') as write_file:
ret
|
urn json.dump(data, write_file)
def addToMyStations(stationuuid, name, url, favicon, bitrate):
my_stations[stationuuid] = {'stationuuid': stationuuid, 'name': name, 'url': url, 'bitrate': bitrate, 'favicon': favicon}
writeFile(mystations_path, my_stations)
def delFromMyStations(stationuuid):
if stationuuid in my_stations:
del my_stations[stationuuid]
writeFile(mystations_path, my_stations)
xbmc.executebuiltin('Container.Refresh')
# create storage
if not xbmcvfs.exists(profile):
xbmcvfs.mkdir(profile)
if xbmcvfs.exists(mystations_path):
my_stations = readFile(mystations_path)
else:
writeFile(mystations_path, my_stations)
mode = args.get('mode', None)
if mode is None:
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/topclick/100'})
li = xbmcgui.ListItem(LANGUAGE(32000), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/topvote/100'})
li = xbmcgui.ListItem(LANGUAGE(32001), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/lastchange/100'})
li = xbmcgui.ListItem(LANGUAGE(32002), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'stations', 'url': '/json/stations/lastclick/100'})
li = xbmcgui.ListItem(LANGUAGE(32003), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'tags'})
li = xbmcgui.ListItem(LANGUAGE(32004), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'countries'})
li = xbmcgui.ListItem(LANGUAGE(32005), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'search'})
li = xbmcgui.ListItem(LANGUAGE(32007), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
localUrl = build_url({'mode': 'mystations'})
li = xbmcgui.ListItem(LANGUAGE(32008), iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'tags':
data = downloadApiFile('/json/tags', None)
dataDecoded = json.loads(data)
for tag in dataDecoded:
tagName = tag['name']
if int(tag['stationcount']) > 1:
try:
localUrl = build_url({'mode': 'stations', 'key': 'tag', 'value' : base64.b32encode(tagName.encode('utf-8'))})
li = xbmcgui.ListItem(tagName, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
except Exception as e:
xbmc.err(e)
pass
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'countries':
data = downloadApiFile('/json/countries', None)
dataDecoded = json.loads(data)
for tag in dataDecoded:
countryName = tag['name']
if int(tag['stationcount']) > 1:
try:
localUrl = build_url({'mode': 'states', 'country': base64.b32encode(countryName.encode('utf-8'))})
li = xbmcgui.ListItem(countryName, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=localUrl, listitem=li, isFolder=True)
except Exception as e:
xbmc.log("Stationcount is not of type int", xbmc.LOGERROR)
pass
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'states':
country = args['country'][0]
country = base64.b32decode(country)
country = countr
|
chrisspen/homebot
|
src/test/detect_voice/sphinxtrain_test/samples/make_fileids.py
|
Python
|
mit
| 559
| 0.005367
|
#!/usr/bin/env python
import os, sys
def touch(path):
with open(path, 'a'):
os.utime(path, None)
fout = open('sample.fileids', 'wb')
fout2 = open('sample.transcription', 'wb')
for fn in sorted(os.listdir('.')):
if not fn.endswith('.wav'):
continue
|
base_fn = os.path.splitext(fn)[0]
txt_fn = base_fn + '.txt'
touch(txt_fn
|
)
text = open(txt_fn).read().strip()
if text and not text.startswith('#'):
fout.write('samples/%s\n' % base_fn)
fout2.write('<s> %s <s> (%s)\n' % (text, base_fn))
print 'Done.'
|
fau-fablab/kastenwesen
|
test_cron.py
|
Python
|
gpl-3.0
| 4,191
| 0.000954
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from cron_status import *
class TestChangeDetection(unittest.TestCase):
"""Test if the change detection is operational."""
# Please note that status_history_list is backwards,
# i.e., newest entry first.
def test_all_okay(self):
status_history_list = [
|
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
|
self.assertEqual(status[0].current_status, ContainerStatus.OKAY)
self.assertTrue(status[0].container_name in status_history_list[0])
self.assertEqual(status[0].current_msg, status_history_list[0][status[0].container_name][1])
def test_all_failed(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
self.assertEqual(status[0].current_status, ContainerStatus.FAILED)
def test_failed_after_starting_short(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_failed_after_starting_very_long(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_okay_after_failed(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
def test_failed_after_okay(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_missing_data(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_too_much_data(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
status_history_list += [{'foo': (ContainerStatus.FAILED, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
if __name__ == '__main__':
unittest.main()
|
wiltonlazary/arangodb
|
3rdParty/V8/gyp/generator/eclipse.py
|
Python
|
apache-2.0
| 16,471
| 0.010685
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# # Copy additional generator configuration data from VS, which is shared by the Windows Ninja generator.
# import gyp.generator.msvs as msvs_generator
# generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', [])
# generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if 'include_dirs' in config:
include_dirs = config['include_dirs']
for shared_intermedia
|
te_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
|
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, _, config_name, params, compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = targe
|
flowersteam/SESM
|
SESM/scene.py
|
Python
|
gpl-3.0
| 953
| 0.005247
|
import time
import threading
import subprocess
i
|
mport pygame.locals
vlc_path = 'C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe'
class Scene(threading.Thread):
def __init__(self, screen, games, games_manager):
threading.Thread.__init__(self)
self.screen = screen
self.games = games
self.games_manager = games_m
|
anager
class DummyScene(Scene):
def run(self):
time.sleep(5)
class VideoScene(Scene):
def __init__(self, screen, games, games_manager, filename):
Scene.__init__(self, screen, games, games_manager)
self.filename = filename
def run(self):
subprocess.call([vlc_path, self.filename, '--play-and-exit', '--fullscreen'])#, shell=True)
if __name__ == '__main__':
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((700, 800))
s = VideoScene(screen, None, None, 'videos/p2.mpg')
s.start()
s.join()
|
Baguage/django-google-analytics-id
|
tests/test_settings.py
|
Python
|
mit
| 612
| 0.001634
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os
|
.path.abspath(__file__)))
SECRET_KEY = 'fake-key'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'analytics',
'tests'
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.D
|
jangoTemplates',
'APP_DIRS': True,
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
jenniferwx/Programming_Practice
|
Permutations.py
|
Python
|
bsd-3-clause
| 365
| 0.016438
|
'''
Generate all permutations
|
of a given string
'''
def permutations(str):
if(len(str)==1):
return str[0]
permutation = permutations(str[1:])
word = str[0]
result = []
for perm in permutation:
for i in range(len(perm)+1): # note: len+1
|
result.append(perm[:i]+word+perm[i:])
return result
|
imxiaohui/django-todolist-1
|
lists/migrations/0003_auto_20150308_2120.py
|
Python
|
mit
| 1,229
| 0.001627
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_auto_20150301_1937'),
]
operations = [
migrations.AlterModelOptions(
name='todo',
options={'ordering': ('created_at',)},
),
migrations.AlterModelOptions(
name='todolist',
options={'ordering': ('created_at',)},
),
migrations.AlterField(
model_name='todo',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='todos'),
preserve_default=True,
),
migrations.AlterField(
model_name='todo',
name='todolist',
field=models.ForeignKey(to='lists.TodoList', related_name='todos'),
preserve_default=True,
),
migration
|
s.AlterField(
|
model_name='todolist',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, related_name='todolists'),
preserve_default=True,
),
]
|
williamleif/histwords
|
representations/cooccurgen.py
|
Python
|
apache-2.0
| 944
| 0.002119
|
from collections import Counter
import numpy as np
def run(word_gen, index, window_size, out_file):
context = []
pair_counts = Counter()
for word in word_gen:
context.append(index[word])
if len(context) > window_size * 2 + 1:
context.pop(0)
pair_counts = _process_context(context, pair_counts, window_size)
import pyximport
pyximport.install(setup_args={
|
"include_dirs": np.get_include()})
from representations import sparse_io
sparse_io.export_mat_from_dict(pair_counts, out_file)
def _process_context(context, pair_counts, window_size):
if len(con
|
text) < window_size + 1:
return pair_counts
target = context[window_size]
indices = range(0, window_size)
indices.extend(range(window_size + 1, 2 * window_size + 1))
for i in indices:
if i >= len(context):
break
pair_counts[(target, context[i])] += 1
return pair_counts
|
osiloke/Flumotion-Transcoder
|
flumotion/transcoder/admin/transbalancer.py
|
Python
|
lgpl-2.1
| 4,755
| 0.001682
|
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from zope.interface import Interface, implements
from flumotion.transcoder.admin import admintask
class ITranscoderBalancerListener(Interface):
def onSlotsAvailable(self, balancer, count):
pass
class TranscoderBalancerListener(object):
implements(ITranscoderBalancerListener)
def onSlotsAvailable(self, balancer, count):
pass
class TranscoderBalancer(object):
"""
Handle the distribution of transcoding tasks to a set of workerPxy.
"""
def __init__(self, listener=None):
self._listener = listener
self._workerTasks = {} # {workerPxy: [task]}
self._orphanes = []
self._current = 0
self._maximum = 0
## Public Methods ##
def getAvailableSlots(self):
return max(self._maximum - self._current, 0)
def clearTasks(self):
self._current = 0
del self._orphanes[:]
for tasks in self._workerTasks.itervalues():
del tasks[:]
def addWorker(self, workerPxy):
assert not (workerPxy in self._workerTasks)
self._workerTasks[workerPxy] = []
self._maximum += workerPxy.getWorkerContext().getMaxTask()
def removeWorker(self, workerPxy):
assert workerPxy in self._workerTasks
self._maximum -= workerPxy.getWorkerContext().getMaxTask()
self._orphanes.extend(self._workerTasks[workerPxy])
del self._workerTasks[workerPxy]
def addTask(self, task, workerPxy=None):
assert admintask.IAdminTask.providedBy(task)
assert (workerPxy == None) or (workerPxy in self._workerTasks)
self._current += 1
if workerPxy:
max = workerPxy.getWorkerContext().getMaxTask()
curr = len(self._workerTasks[workerPxy])
if max > curr:
self._workerTasks[workerPxy].append(task)
task.suggestWorker(workerPxy)
return
self._orphanes.append(task)
def removeTask(self, task):
assert admintask.IAdminTask.providedBy(task)
if task in self._orphanes:
self._orphanes.remove(task)
self._current -= 1
|
return
for tasks in self._workerTasks.itervalues():
if task in tasks:
tasks.remove(task)
self._current -= 1
|
return
def balance(self):
def getSortedWorkers():
"""
Return all the workers with at least 1 free slot
with the ones with the most free slots first.
"""
lookup = dict([(w, float(len(t)) / w.getWorkerContext().getMaxTask())
for w, t in self._workerTasks.items()
if len(t) < w.getWorkerContext().getMaxTask()])
workerPxys = lookup.keys()
workerPxys.sort(key=lookup.get)
return workerPxys
if self._workerTasks:
# First remove the exceding tasks
for workerPxy, tasks in self._workerTasks.iteritems():
max = workerPxy.getWorkerContext().getMaxTask()
if len(tasks) > max:
diff = len(tasks) - max
oldTasks = tasks[diff:]
del tasks[diff:]
self._orphanes.extend(oldTasks)
for task in oldTasks:
task.suggestWorker(None)
# Then distribute the orphanes until there is
# no more free slots or no more orphane tasks
while True:
workerPxys = getSortedWorkers()
if not workerPxys: break
for workerPxy in workerPxys:
if not self._orphanes: break
tasks = self._workerTasks[workerPxy]
task = self._orphanes.pop()
tasks.append(task)
task.suggestWorker(workerPxy)
else:
continue
break
available = self.getAvailableSlots()
if self._listener and (available > 0):
self._listener.onSlotsAvailable(self, available)
|
pombredanne/pythran
|
pythran/tests/test_spec_parser.py
|
Python
|
bsd-3-clause
| 3,994
| 0.008763
|
import unittest
import pythran
import os.path
#pythran export a((float,(int,uintp),str list) list list)
#pythran export a(str)
#pythran export a( (str,str), int, intp list list)
#pythran export a( float set )
#pythran export a( bool:str dict )
#pythran export a( float )
#pythran export a( int8[] )
#pythran export a( int8[][] order (F))
#pythran export a( byte )
#pythran export a0( uint8 )
#pythran export a1( int16 )
#pythran export a2( uint16 )
#pythran export a3( int32 )
#pythran export a4( uint32 )
#pythran export a5( int64 )
#pythran export a6( uint64 )
#pythran export a7( float32 )
#pythran export a8( float64 )
#pythran export a9( complex64 )
#pythran export a10( complex128 )
#pythran export a( int8 set )
#pythran export b( int8 set? )
#pythran export a( uint8 list)
#pythran export a( int16 [], slice)
#pythran export a( uint16 [][] order(C))
#py
|
thran export a( uint16 [::][])
#pythran export a( uint16 [:,:,:])
#pythran export
|
a( uint16 [:,::,:])
#pythran export a( uint16 [,,,,])
#pythran export a( (int32, ( uint32 , int64 ) ) )
#pythran export a( uint64:float32 dict )
#pythran export a( float64, complex64, complex128 )
class TestSpecParser(unittest.TestCase):
def test_parser(self):
real_path = os.path.splitext(os.path.realpath(__file__))[0]+".py"
with open(real_path) as fd:
print(pythran.spec_parser(fd.read()))
def test_invalid_specs0(self):
code = '#pythran export foo()\ndef foo(n): return n'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs1(self):
code = '#pythran export boo(int)\ndef boo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs2(self):
code = '#pythran export bar(int)\ndef foo(): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_invalid_specs3(self):
code = '#pythran export bar(int, int?, int)\ndef bar(x, y=1, z=1): pass'
with self.assertRaises(pythran.syntax.PythranSyntaxError):
pythran.compile_pythrancode("dumber", code)
def test_multiline_spec0(self):
code = '''
#pythran export foo(
# )
def foo(): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec1(self):
code = '''
#pythran export foo(int
#, int
# )
def foo(i,j): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_multiline_spec2(self):
code = '''
# pythran export foo(int,
# float
#, int
# )
def foo(i,j,k): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec0(self):
code = '''
# pythran export soo(int) this is an int test
def soo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_crappy_spec1(self):
code = '''
# pythran export poo(int)
#this is a pythran export test
def poo(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec0(self):
code = '''
def too(i): return
# pythran export too(int)
#this is a pythran export test
def bar(i): return
'''
self.assertTrue(pythran.spec_parser(code))
def test_middle_spec1(self):
code = '''
def zoo(i): return
#this is a pythran export test
# pythran export zoo(int)
#this is an export test
# pythran export zoo(str)
def bar(i): return
'''
self.assertEquals(len(pythran.spec_parser(code).functions), 1)
self.assertEquals(len(pythran.spec_parser(code).functions['zoo']), 2)
def test_var_export0(self):
code = '''
# pythran export coo
coo = 1
'''
self.assertTrue(pythran.spec_parser(code))
|
racker/cloud-init-debian-pkg
|
cloudinit/config/cc_puppet.py
|
Python
|
gpl-3.0
| 5,166
| 0.000387
|
# vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
import os
import socket
from cloudinit import helpers
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
util.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warn(("Sorry we do not know how to enable"
" puppet services on this system"))
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
if 'puppet' not in cfg:
log.debug(("Skipping module named %s,"
" no 'puppet' configuration found"), name)
return
puppet_cfg = cfg['puppet']
# Start by installing the puppet package if necessary...
install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
version = util.get_cfg_option_str(puppet_cfg, 'version', None)
if not install and version:
log.warn(("Puppet install set false but version supplied,"
" doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
version if version else 'latest')
cloud.distro.install_packages(('puppet', version))
# ... and then update the puppet configuration
if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
contents = util.load_file(PUPPET_CONF_PATH)
# Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to
# mix the rest up. First clean them up
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents),
filename=PUPPET_CONF_PATH)
for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(PUPPET_SSL_DIR, 0771)
util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
util.ensure_dir(PUPPET_SSL_CERT_DIR)
util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.iteritems():
if o == 'certname':
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
|
# Expand %i as the instance id
v = v.replace("%i", cloud.get_instance_id())
# certname needs to be downcased
v = v.lower()
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CO
|
NF_PATH))
util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
# Set it up so it autostarts
_autostart_puppet(log)
# Start puppetd
util.subp(['service', 'puppet', 'start'], capture=False)
|
canvasnetworks/canvas
|
website/apps/activity/jinja_tags.py
|
Python
|
bsd-3-clause
| 444
| 0.009009
|
from jinja2 import Markup, contextfunction
from canvas.templatetags.jinja_base import (global_tag, filter_tag, render_jinja_to_string,
|
jinja_context_tag, update_context)
@global_tag
def activity_stream_item(acti
|
vity, viewer):
ctx = {
'activity': activity,
'viewer': viewer,
}
return Markup(render_jinja_to_string(u'activity/types/{0}.html'.format(activity.TYPE), ctx))
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/autotvm/__init__.py
|
Python
|
apache-2.0
| 794
| 0.001259
|
"""The auto-tuning module of tvm
This module includes:
* Tuning space definition API
* Efficient auto-tuners
* Tuning result and database support
* Distributed measurement to scale up tuning
"""
from . import database
from . import feature
from . import measure
from . import record
from . import task
from . import tuner
from . import util
from . import env
from . import tophub
# some shortcuts
from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \
LocalBuil
|
der, LocalRunner, RPCRunner
from .tuner import callback
from .task import template, get_config, create,
|
ConfigSpace, ConfigEntity, \
register_topi_compute, register_topi_schedule, \
DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best
from .env import GLOBAL_SCOPE
|
JIMyungSik/uftrace
|
tests/t145_longjmp3.py
|
Python
|
gpl-2.0
| 1,447
| 0.001382
|
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'longjmp3', """
# DURATION TID FUNCTION
1.164 us [ 4107] | __monstartup();
0.657 us [ 4107] | __cxa_atexit();
[ 4107] | main() {
0.705 us [ 4107] | _setjmp() = 0;
1.823 us [ 4107] | getpid();
0.182 us [ 4107] | _setjmp() = 0;
[ 4107] | foo() {
[ 4107] | __longjmp_chk(1) {
8.790 us [ 4107] | } = 1; /* _setjmp */
0.540 us [ 4107] |
|
getpid();
[ 4107] | bar() {
[ 4107] | baz() {
[ 4107] | __longjmp_chk(2) {
1.282 us [ 4107] | } = 2; /* _setjmp */
0.540 us [ 4107] | getpid();
[ 4107] | foo() {
[ 4107] | __longjmp_chk(3) {
0.578 us [ 4107] | } = 3; /* _setjmp */
[ 4107] | bar() {
[ 4107]
|
| baz() {
[ 4107] | __longjmp_chk(4) {
0.642 us [ 4107] | } = 4; /* _setjmp */
18.019 us [ 4107] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
return TestBase.build(self, name, cflags + ' -D_FORTIFY_SOURCE=2', ldflags)
def runcmd(self):
args = '-A .?longjmp@arg2 -R .?setjmp@retval'
return '%s %s %s' % (TestBase.ftrace, args, 't-' + self.name)
def fixup(self, cflags, result):
return result.replace('__longjmp_chk', "longjmp")
|
hltbra/openxmllib
|
tests/test_wordprocessing.py
|
Python
|
gpl-2.0
| 1,363
| 0.003679
|
# -*- coding: utf-8 -*-
"""
Testing WordProcessingDocument
"""
# $Id: test_wordprocessing.py 6355 2007-09-20 17:16:21Z glenfant $
import unittest
import os
from fixures import *
import openxmllib
class WordProcessingTest(unittest.TestCase):
"""Testing querying properties from a document"""
def setUp(self):
test_file_path = os.path.join(TEST_FILES_IN, ALL_IN_FILES[0])
self.doc = openxmllib.openXmlDocument(test_file_path)
return
def test_indexableText(self):
"""Indexable text with properties"""
itext = self.doc.indexableText()
some_words = (u'A', u'full', u'chàractèrs', u'non', u'custom_value_2', u'title')
|
for word in some_words:
self.failUnless(word in itext, "%s was expected" % word)
return
def test_indexableTextNoprop(self):
"""Indexable text without properties"""
itext = self.doc.indexableText(include_properties=False)
some_words = (u'A', u'full', u'chàractèrs', u'non')
for word in some_words:
self.failUnless(word in itext, "%s was expected" %
|
word)
return
# /class WordProcessingTest
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WordProcessingTest))
return suite
if __name__ == '__main__':
unittest.TextTestRunner().run(test_suite())
|
zeke8402/codingame-puzzles
|
medium/apu-init-phase/solution.py
|
Python
|
mit
| 1,508
| 0.002653
|
import sys
import math
# Don't let the machines win. You are humanity's last hope...
width = int(input()) # the number of cells on the X axis
height = int(input()) # the number of cells on the Y axis
Matrix = [[0 for x in range(height)] for x in range(width)]
# Creating the matrix
for i in range(height):
line = input() # width characters, each either 0 or .
for j in range(width):
Matrix[j][i] = line[j]
cArray = []
for i in range(width):
for j in range(height):
coordinates = ""
if Matrix[i][j] == '0':
coordinates = coordinates + str(i)+" "+str(j)+" "
# Find Next Node to the Right
k = i+1
print("k is "+str(k), file=sys.stderr)
while k != width+1:
if k != width:
if Matrix[k][j] == '0':
coordinates = coordinates + str(k)+" "+str(j)+"
|
"
break
else:
coordinates = coordinates + "-1 -1 "
k += 1
# Find Next Node to the Bottom
k = j+1
while k != height+1:
if k != height:
if Matrix[i][k] == '0':
coordinates = coord
|
inates + str(i)+" " +str(k)+" "
break
else:
coordinates = coordinates + "-1 -1 "
k += 1
cArray.append(coordinates)
for c in cArray:
print(c)
|
Forage/Gramps
|
gramps/gui/editors/editpersonref.py
|
Python
|
gpl-2.0
| 6,336
| 0.004735
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
The EditPersonRef module provides the EditPersonRef class. This provides a
mechanism for the user to edit address information.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from .editsecondary import EditSecondary
from gramps.gen.lib import NoteType
from ..widgets import MonitoredEntry, PrivacyButton
from ..selectors import SelectorFactory
from .displaytabs import CitationEmbedList, NoteTab
from ..glade import Glade
#-------------------------------------------------------------------------
#
# EditPersonRef class
#
#-------------------------------------------------------------------------
class EditPersonRef(EditSecondary):
"""
Displays a dialog that allows the user to edit a person reference.
"""
def __init__(self, dbstate, uistate, track, addr, callback):
"""
Displays the dialog box.
parent - The class that called the PersonRef editor.
addr - The address that is to be edited
"""
EditSecondary.__init__(self, dbstate, uistate, track, addr, callback)
def _local_init(self):
self.width_key = 'interface.person-ref-width'
self.height_key = 'interface.person-ref-height'
self.top = Glade()
self.set_window(self.top.toplevel,
self.top.get_object("title"),
_('Person Reference Editor'))
self.person_label = self.top.get_object('person')
def _setup_fields(self):
if self.obj.ref:
p = self.dbstate.db.get_person_from_handle(self.obj.ref)
self.person_label.set_text(name_displayer.display(p))
self.street = MonitoredEntry(
self.top.get_object("relationship"),
self.obj.set_relation,
self.obj.get_relation,
self.db.readonly)
self.priv = PrivacyButton(
self.top.get_object("private"),
self.obj,
self.db.readonly)
def _connect_signals(self):
#self.define_help_button(self.top.get_object('help'))
self.define_cancel_button(self.top.get_object('cancel'))
self.define_ok_button(self.top.get_object('ok'),self.save)
self.top.get_object('select').connect('clicked',self._select_person)
def _connect_db_signals(self):
"""
Connect any signals that need to be connected.
Called by the init routine of the base class (_EditPrimary).
"""
self._add_db_signal('person-rebuild', self.close)
self._add_db_signal('person-delete', self.check_for_close)
def check_for_close(self, handles):
"""
Callback method for delete signals.
If there is a delete signal of the primary object we are editing, the
editor (and all child windows spawned) should be closed
"""
if self.obj.ref in handles:
self.close()
def _select_person(self, obj):
SelectPerson = SelectorFactory('Person')
sel = SelectPerson(self.dbstate, self.uistate, self.track)
person = sel.run()
if person:
self.obj.ref = person.get_handle()
self.person_label.set_text(name_displayer.display(person))
def _create_tabbed_pages(self):
"""
Create the notebook tabs and inserts them into the main
window.
"""
notebook = Gtk.Notebook()
self.srcref_list = CitationEmbedList(self.dbstate, self.uistate,
self.track,
self.obj.get_citation_list())
self._add_tab(notebook, self.srcref_list)
self.track_ref_for_deletion("srcref_list")
self.note_tab = NoteTab(self.dbstate, self.uistate, self.track,
self.obj.get_note_list(),
|
notetype=NoteType.ASSOCIATION)
s
|
elf._add_tab(notebook, self.note_tab)
self.track_ref_for_deletion("note_tab")
self._setup_notebook_tabs(notebook)
notebook.show_all()
self.top.get_object('vbox').pack_start(notebook, True, True, 0)
def build_menu_names(self, obj):
return (_('Person Reference'),_('Person Reference Editor'))
def save(self,*obj):
"""
Called when the OK button is pressed. Gets data from the
form and updates the Address data structure.
"""
if self.obj.ref:
if self.callback:
self.callback(self.obj)
self.callback = None
self.close()
else:
from ..dialog import ErrorDialog
ErrorDialog(
_('No person selected'),
_('You must either select a person or Cancel '
'the edit'))
|
SeungGiJeong/SK_FastIR
|
memory/windows2012ServerR2Memory.py
|
Python
|
gpl-3.0
| 440
| 0.002273
|
from __future__ import unicode_literals
from memory.mem import _Memory
|
class Windows2012ServerR2Memory(_Memory):
def __init__(self, params):
super(Windows2012ServerR2Memory, self).__init__(params)
def csv_all_modules_dll(self):
super(Windows2012ServerR2Memory, self)._csv_all_modules_dll()
def csv_all_modules_opened_files(self):
|
super(Windows2012ServerR2Memory, self)._csv_all_modules_opened_files()
|
wrobell/geocoon
|
geocoon/tests/test_core.py
|
Python
|
gpl-3.0
| 9,461
| 0.003065
|
#
# GeoCoon - GIS data analysis library based on Pandas and Shapely
#
# Copyright (C) 2014 by Artur Wroblewski <wrobell@pld-linux.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
GeoCoon core unit tests.
"""
import pandas
from shapely.geometry import Point, LineString, Polygon
from geocoon.core import GeoDataFrame, PointSeries, LineStringSeries, \
PolygonSeries, fetch_attr
from geocoon.meta import META_POINT, META_LINE_STRING, META_POLYGON
import unittest
class GeoDataFrameTestCase(unittest.TestCase):
"""
Basic GIS data frame tests.
"""
def test_dict_constructor(self):
"""
Test GIS data frame constructor with dictionary
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
df = GeoDataFrame({'a': series})
self.assertEqual(PointSeries, type(df.a))
def test_assign_new_col(self):
"""
Test assigning GIS series as column to GIS data frame
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
df = GeoDataFrame({})
df['a'] = series
self.assertEqual(PointSeries, type(df.a))
def test_grouping(self):
"""
Test GIS data frame grouping
"""
data = [Point(v, v * 2) for v in range(5)]
series = PointSeries(data)
data = {
'a': series,
'b': [4, 5, 5, 4, 5],
}
df = GeoDataFrame(data)
gdf = df.groupby('b')
df = gdf.get_group(4)
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([0, 3] == df.a.x))
self.assertTrue(all([0, 6] == df.a.y))
df = gdf.get_group(5)
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([1, 2, 4] == df.a.x))
self.assertTrue(all([2, 4, 8] == df.a.y))
def test_select(self):
"""
Test selecting from GIS data frame
"""
data = [Point(v, v * 2) for v in range(5)]
series = PointSeries(data)
data = {
'a': series,
'b': [4, 5, 5, 4, 5],
}
df = GeoDataFrame(data)
df = df[df.b == 4]
self.assertEqual(PointSeries, type(df.a))
self.assertTrue(all([4] * 2 == df.b))
class GeoSeriesTestCase(unittest.TestCase):
"""
Basic GIS series tests.
"""
def test_create(self):
"""
Test GIS series creation
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
self.assertEqual(PointSeries, type(series)
|
)
def test_fetch_attr(self):
"""
Test fetch GIS properties from GIS
|
series
"""
data = [Point(v, v * 2) for v in [1, 2]]
series = PointSeries(data)
y = fetch_attr(series, name='y')
self.assertTrue(all(y == [2, 4]))
def test_select(self):
"""
Test selecting from GIS series
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
sub = series[(series.x < 4) & (series.y > 2)]
self.assertEqual(PointSeries, type(sub))
self.assertTrue(all([2, 3] == sub.x))
self.assertTrue(all([4, 6] == sub.y))
def test_select_single(self):
"""
Test selecting single GIS object
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
p = series[1]
self.assertEqual(Point, type(p))
def test_slice(self):
"""
Test slicing GIS series
"""
data = [Point(v, v * 2) for v in [1, 2, 3, 4, 5, 6]]
series = PointSeries(data)
sub = series[:3]
self.assertEqual(PointSeries, type(sub))
self.assertTrue(all([1, 2, 3] == sub.x))
self.assertTrue(all([2, 4, 6] == sub.y))
class PointSeriesTestCase(unittest.TestCase):
"""
Point GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of point properties
"""
data = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
series = PointSeries(data)
attrs = (k for k, v in META_POINT.items() if v.is_property)
for attr in attrs:
value = getattr(series, attr) # no error? good
self.assertEqual(3, len(value))
def test_method_adapt_buffer(self):
"""
Test adaptation of point buffer method
"""
data = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
series = PointSeries(data)
value = series.buffer(0.2, resolution=3) # no error? good
self.assertEqual(3, len(value))
self.assertEqual(PolygonSeries, type(value))
def test_method_adapt_geom(self):
"""
Test adaptation of point methods (first param is geometry)
"""
p1 = [Point(v, v * 2, v * 3) for v in [5, 2, 4]]
p2 = [Point(v, v * 2, v * 3) for v in [5, 2]] + [Point(4.1, 1, 1)]
s1 = PointSeries(p1)
s2 = PointSeries(p2)
methods = (k for k, v in META_POINT.items() if v.first_is_geom)
for method in methods:
mcall = getattr(s1, method) # no error? good
value = mcall(s2)
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
# just in case
value = s1.equals(s2)
self.assertTrue(all([True, True, False] == value), value)
class LineStringSeriesTestCase(unittest.TestCase):
"""
Line string GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of line string properties
"""
d1 = tuple((v, v * 1, v * 4) for v in (5, 2, 4))
d2 = tuple((v, v * 2, v * 5) for v in [5, 2, 4])
d3 = tuple((v, v * 3, v * 6) for v in [5, 2, 4])
l1 = LineString(d1)
l2 = LineString(d2)
l3 = LineString(d3)
series = LineStringSeries([l1, l2, l3])
attrs = (k for k, v in META_LINE_STRING.items() if v.is_property)
for attr in attrs:
value = getattr(series, attr) # no error? good
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
def test_method_adapt(self):
"""
Test adaptation of line string methods
"""
d1 = tuple((v, v * 1, v * 4) for v in (5, 2, 4))
d2 = tuple((v, v * 2, v * 5) for v in [5, 2, 4])
d3 = tuple((v, v * 3, v * 6) for v in [5, 2, 4])
d4 = tuple((v, v * 4, v * 7) for v in [5, 2, 4])
l1 = LineString(d1)
l2 = LineString(d2)
l3 = LineString(d3)
l4 = LineString(d4)
s1 = LineStringSeries([l1, l2, l3])
s2 = LineStringSeries([l2, l3, l4])
methods = (k for k, v in META_LINE_STRING.items() if v.first_is_geom)
for method in methods:
mcall = getattr(s1, method) # no error? good
value = mcall(s2)
self.assertEqual(3, len(value))
self.assertTrue(all(not callable(v) for v in value))
class PolygonSeriesTestCase(unittest.TestCase):
"""
Polygon GIS series unit tests.
"""
def test_property_adapt(self):
"""
Test adaptation of polygon properties
"""
poly = lambda v: Polygon(((v, v), (v + 0.1, v), (v + 0.2, v + 0.2), (v, v)))
data = [poly(v) for v in [5, 2, 4]]
series = PolygonSeries(data)
attrs = (k for k, v in META_POLYGON.items() if v.is_property)
for attr in attrs:
val
|
camptocamp/QGIS
|
python/plugins/processing/saga/SplitRGBBands.py
|
Python
|
gpl-2.0
| 3,715
| 0.005922
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SplitRGBBands.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.tools.system import *
from processing.tools import dataobjects
from processing.saga.SagaUtils import SagaUtils
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtGui
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.outputs.OutputRaster import OutputRaster
import os
class SplitRGBBands(GeoAlgorithm):
INPUT = "INPUT"
R = "R"
G = "G"
B = "B"
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + "/../images/saga.png")
def defineCharacteristics(self):
self.name = "Split RGB bands"
self.group = "Grid - Tools"
self.addParameter(ParameterRaster(SplitRGBBands.INPUT, "Input layer", False))
self.addOutput(OutputRaster(SplitRGBBands.R, "Output R band layer"))
self.addOutput(OutputRaster(SplitRGBBands.G, "Output G band layer"))
self.addOutput(OutputRaster(SplitRGBBands.B, "Output B band layer"))
def processAlgorithm(self, progress):
#TODO:check correct num of bands
input = self.getParameterValue(SplitRGBBands.INPUT)
temp = getTempFilename(None).replace('.','');
basename = os.path.basename(temp)
validChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
safeBasename = ''.join(c for c in basename if c in validChars)
|
temp = os.path.join(os.path.dirname(temp), safeBasename)
r = self.getOutputValue(SplitRGBBands.R)
g = self.getOutputValue(SplitRGBBands.G)
b = self.getOutputValue(SplitRGBBands.B)
|
commands = []
if isWindows():
commands.append("io_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input+"\"")
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("io_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
else:
commands.append("libio_gdal 0 -GRIDS \"" + temp + "\" -FILES \"" + input + "\"")
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0001.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + r + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0002.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + g + "\"");
commands.append("libio_gdal 1 -GRIDS \"" + temp + "_0003.sgrd\" -FORMAT 1 -TYPE 0 -FILE \"" + b + "\"");
SagaUtils.createSagaBatchJobFileFromSagaCommands(commands)
SagaUtils.executeSaga(progress);
|
AndrewWalker/ccsyspath
|
ccsyspath/paths.py
|
Python
|
mit
| 1,112
| 0.005396
|
import subprocess
from subprocess import Popen, PIPE
import os
import re
def compiler_preprocessor_verbose(compiler, extraflags):
"""Capture the compiler preprocessor stage in verbose mode
"""
lines = []
with open(os.devnull, 'r') as devnull:
cmd = [compiler, '-E']
cmd += extraflags
cmd += ['-', '-v']
p = Popen(cmd, stdin=devnull, stdout=PIPE, stderr=PIPE)
p.wait()
p.stdout.close()
lines = p.stderr.read()
lines = lines.decode('utf-8')
lines = lines.splitlines()
return lines
def system_include_paths(compiler, cpp=True):
extraflags = []
if cpp:
extraflags = '-x c++'.split()
lines = compiler_preprocessor_verbose(compiler, extraflags)
lines = [ line
|
.strip() for line in lines ]
start = lines.index('#include <...> search starts here:')
end = lines.index('End of search list.')
lines = lines[start+1:end]
paths = []
for line in lines:
line = li
|
ne.replace('(framework directory)', '')
line = line.strip()
paths.append(line)
return paths
|
Juanlu001/pfc
|
demo/aquaclient.py
|
Python
|
gpl-3.0
| 883
| 0.002265
|
# coding: utf-8
"""Client program.
How to collect data:
1. Positions received
|
as a list of points
2. Interpolate function (probably unstructured grid, see scipy.interpolate.griddata)
3. Evaluate function on points coming from the FEniCS mesh
4. Restore the values onto the array of a FEniCS function in the proper order
Hint: http://fenicsproject.org/qa/3975/interpolating-vector-function-from-python-code-to-fenics#a3976
fe.vector()[V.dofmap().dofs()] = f(x, y)
"""
import sys
import zmq
imp
|
ort pprint
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5556")
print("Collecting data...")
dt = 0.1 # s
t_aim = 0.0 # s
# Collect all
while True:
# We first initialize the server
socket.send_pyobj({'time': t_aim})
t_aim += dt
data = socket.recv_pyobj()
print(data['time'], data.keys())
|
rsalmaso/django-allauth
|
allauth/socialaccount/providers/paypal/tests.py
|
Python
|
mit
| 613
| 0
|
from allauth.socialaccount.tests imp
|
ort OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import PaypalProvider
class PaypalTests(OAuth2TestsMixin, TestCase):
provider_id = PaypalProvider.id
def get_mocked_response(self):
return MockedResponse(
200,
"""
{
"user_id":
"https://www.paypal.com/webapps/auth/server/64ghr894040044",
"name": "Jane Doe",
"given_name": "Jane",
|
"family_name": "Doe",
"email": "janedoe@example.com"
}
""",
)
|
vahana/prf
|
prf/tests/test_exc.py
|
Python
|
mit
| 2,450
| 0.015102
|
import mock
import json
import pytest
import prf.exc
def fake_resp(code):
return mock.MagicMock(
code = str(code),
status_code = code,
headers = [],
title = 'title',
detail = 'detail',
expl
|
anation = 'explanation'
)
class TestExc(object):
@mock.patch('prf.exc.log_exception')
def test_create_response(self, fake_log_exception):
out = prf.exc.create_response(fake_resp(200), {})
assert fake_log_exception.call_count == 0
# Temporarily pop timestamp, we need to freeze time to test it
d = json.loads(out.text)
d.pop('timestamp')
assert {
'explanation': 'explanation',
|
'code': '200',
'detail': 'detail',
'title': 'title'
} == d
assert out.content_type == 'application/json'
@mock.patch('prf.exc.add_stack')
@mock.patch('prf.exc.logger')
def test_log_exception(self, fake_logger, fake_add_stack):
request = mock.MagicMock(
url = 'url',
remote_user = 'remote_user',
client_addr = 'client_addr',
remote_addr = 'remote_addr'
)
out = prf.exc.log_exception(fake_resp(400),
params = dict(
headers = ['Header'],
request = request,
extra = {'a': 123},
detail = 'param detail'
)
)
assert fake_add_stack.called
assert fake_logger.error.called
@mock.patch('prf.exc.log_exception')
def test_create_response_w_log(self, fake_log_exception):
in_resp = mock.MagicMock()
in_resp.code = '400'
in_resp.status_code = 400
out = prf.exc.create_response(in_resp, {})
assert 'error_id' in json.loads(out.text)
assert fake_log_exception.call_count == 1
def test_exception_response(self):
out = prf.exc.exception_response(200)
assert out.code == 200
assert out.content_type == 'application/json'
assert 'error_id' not in out.json
out = prf.exc.exception_response(400, extra={'a':123})
assert 'error_id' in out.json
def test_statuses(self):
res = {'id':1}
out = prf.exc.HTTPCreated(
location = 'http://location',
resource = res
)
assert out.json['resource'] == {'self': 'http://location', 'id': 1}
|
AdaHeads/Coverage_Tests
|
disabled_tests/incoming_calls.py
|
Python
|
gpl-3.0
| 9,794
| 0.027159
|
# -*- coding: utf-8 -*-
import logging
from pprint import pformat
from time import clock, sleep
try:
import unittest2 as unittest
except ImportError:
import un
|
ittest
import config
from event_stack import TimeOutReached
from database_reception import Database_Reception
from static_agent_pools import Receptionists, Customers
logging.basicConfig (level = logging.INFO)
class Test_Case (unittest.TestCase):
Caller
|
= None
Receptionist = None
Receptionist_2 = None
Callee = None
Reception_Database = None
Reception = None
Start_Time = None
Next_Step = 1
def Preconditions (self, Reception):
self.Start_Time = clock ()
self.Next_Step = 1
self.Log ("Incoming calls test case: Setting up preconditions...")
self.Log ("Requesting a customer (caller)...")
self.Caller = Customers.request ()
self.Log ("Requesting a receptionist...")
self.Receptionist = Receptionists.request ()
self.Log ("Requesting a second receptionist...")
self.Receptionist_2 = Receptionists.request ()
self.Log ("Requesting a customer (callee)...")
self.Callee = Customers.request ()
self.Log ("Select which reception to test...")
self.Reception = Reception
self.Log ("Select a reception database connection...")
self.Reception_Database = Database_Reception (uri = config.reception_server_uri,
authtoken = self.Receptionist.call_control.authtoken)
def Postprocessing (self):
self.Log ("Incoming calls test case: Cleaning up after test...")
if not self.Caller is None:
self.Caller.release ()
if not self.Receptionist is None:
self.Receptionist.release ()
if not self.Receptionist_2 is None:
self.Receptionist_2.release ()
if not self.Callee is None:
self.Callee.release ()
def Step (self,
Message,
Delay_In_Seconds = 0.0):
if self.Next_Step is None:
self.Next_Step = 1
if self.Start_Time is None:
self.Start_Time = clock ()
logging.info ("Step " + str (self.Next_Step) + ": " + Message)
sleep (Delay_In_Seconds)
self.Next_Step += 1
def Log (self,
Message,
Delay_In_Seconds = 0.0):
if self.Next_Step is None:
self.Next_Step = 1
if self.Start_Time is None:
self.Start_Time = clock ()
logging.info (" " + str (self.Next_Step - 1) + ": " + Message)
sleep (Delay_In_Seconds)
def Caller_Places_Call (self, Number):
self.Step (Message = "Caller places call to " + str (Number) + "...")
self.Log (Message = "Dialling through caller agent...")
self.Caller.dial (Number)
def Receptionist_Places_Call (self, Number):
self.Step (Message = "Receptionist places call to " + str (Number) + "...")
self.Log (Message = "Dialling through receptionist agent...")
self.Receptionist.dial (Number)
def Caller_Hears_Dialtone (self):
self.Step (Message = "Caller hears dial-tone...")
self.Log (Message = "Caller agent waits for dial-tone...")
self.Caller.sip_phone.Wait_For_Dialtone ()
def Receptionist_Hears_Dialtone (self):
self.Step (Message = "Receptionist hears dial-tone...")
self.Log (Message = "Receptionist agent waits for dial-tone...")
self.Receptionist.sip_phone.Wait_For_Dialtone ()
def Call_Announced (self):
self.Step (Message = "Receptionist's client waits for 'call_offer'...")
try:
self.Receptionist.event_stack.WaitFor ("call_offer")
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("Call offer didn't arrive from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type="call_offer",
destination=self.Reception):
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("The arrived call offer was not for the expected reception (destination).")
return self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['id'],\
self.Receptionist.event_stack.Get_Latest_Event (Event_Type="call_offer", Destination=self.Reception)['call']['reception_id']
def Call_Announced_As_Locked (self, Call_ID):
self.Step (Message = "Call-Flow-Control sends out 'call_lock'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_lock",
call_id = Call_ID,
timeout = 20.0)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_lock' event arrived from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type = "call_lock",
destination = self.Reception,
call_id = Call_ID):
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("The arrived 'call_lock' event was not for the expected reception (destination).")
def Call_Announced_As_Unlocked (self, Call_ID):
self.Step (Message = "Call-Flow-Control sends out 'call_unlock'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_unlock",
call_id = Call_ID)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_unlock' event arrived from Call-Flow-Control.")
if not self.Receptionist.event_stack.stack_contains (event_type = "call_unlock",
destination = self.Reception,
call_id = Call_ID):
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("The arrived 'call_unlock' event was not for the expected reception (destination).")
def Request_Information (self, Reception_ID):
self.Step (Message = "Requesting (updated) information about reception " + str (Reception_ID))
Data_On_Reception = self.Reception_Database.Single (Reception_ID)
self.Step (Message = "Received information on reception " + str (Reception_ID))
return Data_On_Reception
def Offer_To_Pick_Up_Call (self, Call_Flow_Control, Call_ID):
self.Step (Message = "Client offers to answer call...")
try:
Call_Flow_Control.PickupCall (call_id = Call_ID)
except:
self.Log (Message = "Pick-up call returned an error of some kind.")
def Call_Allocation_Acknowledgement (self, Call_ID, Receptionist_ID):
self.Step (Message = "Receptionist's client waits for 'call_pickup'...")
try:
self.Receptionist.event_stack.WaitFor (event_type = "call_pickup",
call_id = Call_ID)
except TimeOutReached:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("No 'call_pickup' event arrived from Call-Flow-Control.")
try:
Event = self.Receptionist.event_stack.Get_Latest_Event (Event_Type = "call_pickup",
Call_ID = Call_ID)
except:
logging.critical (self.Receptionist.event_stack.dump_stack ())
self.fail ("Could not extract the received 'call_pickup' event from the Call-Flow-Control client.")
try:
|
ricrdo/puuch
|
puuch/wsgi.py
|
Python
|
mit
| 1,416
| 0.000706
|
"""
WSGI config for puuch project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level varia
|
ble
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom
|
one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "puuch.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "puuch.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
alirizakeles/zato
|
code/zato-server/src/zato/server/service/internal/security/vault/policy.py
|
Python
|
gpl-3.0
| 154
| 0
|
# -*- coding: utf-8 -*-
|
"""
Copyright (C) 2016, Zato Source s.r.o. https://zato.io
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
| |
Kazade/NeHe-Website
|
google_appengine/google/appengine/api/memcache/__init__.py
|
Python
|
bsd-3-clause
| 52,152
| 0.004429
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Memcache API.
Provides memcached-alike API to application developers to store
data in memory when reliable storage via the DataStore API isn't
required and higher performance is desired.
"""
import cPickle
import cStringIO
import hashlib
import math
import types
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import namespace_manager
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors
MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
MemcacheGetResponse = memcache_service_pb.MemcacheGetResponse
MemcacheGetRequest = memcache_service_pb.MemcacheGetRequest
MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
MemcacheDeleteRequest = memcache_service_pb.MemcacheDeleteRequest
MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
MemcacheBatchIncrementResponse = memcache_service_pb.MemcacheBatchIncrementResponse
MemcacheBatchIncrementRequest = memcache_service_pb.MemcacheBatchIncrementRequest
MemcacheFlushResponse = memcache_service_pb.MemcacheFlushResponse
MemcacheFlushRequest = memcache_service_pb.MemcacheFlushRequest
MemcacheStatsRequest = memcache_service_pb.MemcacheStatsRequest
MemcacheStatsResponse = memcache_service_pb.MemcacheStatsResponse
DELETE_NETWORK_FAILURE = 0
DELETE_ITEM_MISSING = 1
DELETE_SUCCESSFUL = 2
STORED = MemcacheSetResponse.STORED
NOT_STORED = MemcacheSetResponse.NOT_STORED
ERROR = MemcacheSetResponse.ERROR
EXISTS = MemcacheSetResponse.EXISTS
MAX_KEY_SIZE = 250
MAX_VALUE_SIZE = 10 ** 6
STAT_HITS = 'hits'
STAT_MISSES = 'misses'
STAT_BYTE_HITS = 'byte_hits'
STAT_ITEMS = 'items'
STAT_BYTES = 'bytes'
STAT_OLDEST_ITEM_AGES = 'oldest_item_age'
FLAG_TYPE_MASK = 7
FLAG_COMPRESSED = 1 << 3
TYPE_STR = 0
TYPE_UNICODE = 1
TYPE_PICKLED = 2
TYPE_INT = 3
TYPE_LONG = 4
TYPE_BOOL = 5
CAPABILITY = capabilities.CapabilitySet('memcache')
def _is_pair(obj):
"""Helper to test if something is a pair (2-tuple)."""
return isinstance(obj, tuple) and len(obj) == 2
def _add_name_space(message, namespace=None):
"""Populate the name_space field in a messagecol buffer.
Args:
message: A messagecol buffer supporting the set_name_space() operation.
namespace: The name of the namespace part. If None, use the
default namespace. The empty namespace (i.e. '') will clear
the name_space field.
"""
if namespace is None:
namespace = namespace_manager.get_namespace()
if not namespace:
message.clear_name_space()
else:
message.set_name_space(namespace)
def _key_string(key, key_prefix='', server_to_user_dict=None):
"""Utility function to handle different ways of requesting keys.
Args:
key: Either a string or tuple of (shard_number, string). In Google App
Engine the sharding is automatic so the shard number is ignored.
To memcache, the key is just bytes (no defined encoding).
key_prefix: Optional string prefix to prepend to key.
server_to_user_dict: Optional dictionary to populate with a mapping of
server-side key (which includes the key_prefix) to user-supplied key
(which does not have the prefix).
Returns:
The key as a non-unicode string prepended with key_prefix. This is
the key sent to and stored by the server. If the resulting key is
longer then MAX_KEY_SIZE, it will be hashed with sha1 and will be
replaced with the hex representation of the said hash.
Raises:
TypeError: If provided key isn't a string or tuple of (int, string)
or key_prefix.
"""
if _is_pair(key):
key = key[1]
if not isinstance(key, basestring):
raise TypeError('Key must be a string instance, received %r' % key)
if not isinstance(key_prefix, basestring):
raise TypeError('key_prefix must be a string instance, received %r' %
key_prefix)
server_key = key_prefix + key
if isinstance(server_key, unicode):
server_key = server_key.encode('utf-8')
if len(server_key) > MAX_KEY_SIZE:
server_key = hashlib.sha1(server_key).hexdigest()
if server_to_user_dict is not None:
assert isinstance(server_to_user_dict, dict)
server_to_user_dict[server_key] = key
return server_key
def _validate_encode_value(value, do_pickle):
"""Utility function to validate and encode server keys and values.
Args:
value: Value to store in memcache. If it's a string, it will get passed
along as-is. If it's a unicode string, it will be marked appropriately,
such that retrievals will yield a unicode value. If it's any other data
type, this function will attempt to pickle the data and then store the
serialized result, unpickling it upon retrieval.
do_pickle: Callable that takes an object and returns a non-unicode
string containing the pickled object.
Returns:
Tuple (stored_value, flags) where:
stored_value: The value as a non-unicode string that should be stored
in memcache.
flags: An integer with bits set from the FLAG_* constants in this file
to indicate the encoding of the key and value.
Raises:
ValueError: If the encoded value is too large.
|
pickle.PicklingError: If the value is not a string and could not be pickled.
RuntimeError: If a complicated data structure could not be pickled due to
too many levels of recursion in its composition.
"""
flags = 0
stored_value = value
if isinstance(value, str):
pass
elif isinstance(value, unicode):
stored_value = value.encode('utf-8')
flags |= TY
|
PE_UNICODE
elif isinstance(value, bool):
stored_value = str(int(value))
flags |= TYPE_BOOL
elif isinstance(value, int):
stored_value = str(value)
flags |= TYPE_INT
elif isinstance(value, long):
stored_value = str(value)
flags |= TYPE_LONG
else:
stored_value = do_pickle(value)
flags |= TYPE_PICKLED
if len(stored_value) > MAX_VALUE_SIZE:
raise ValueError('Values may not be more than %d bytes in length; '
'received %d bytes' % (MAX_VALUE_SIZE, len(stored_value)))
return (stored_value, flags)
def _decode_value(stored_value, flags, do_unpickle):
"""Utility function for decoding values retrieved from memcache.
Args:
stored_value: The value as a non-unicode string that was stored.
flags: An integer with bits set from the FLAG_* constants in this file
that indicate the encoding of the key and value.
do_unpickle: Callable that takes a non-unicode string object that contains
a pickled object and returns the pickled object.
Returns:
The original object that was stored, be it a normal string, a unicode
string, int, long, or a Python object that was pickled.
Raises:
pickle.UnpicklingError: If the value could not be unpickled.
"""
assert isinstance(stored_value, str)
assert isinstance(flags, (int, long))
type_number = flags & FLAG_TYPE_MASK
value = stored_value
if type_number == TYPE_STR:
return value
elif type_number == TYPE_UNICODE:
return unicode(value, 'utf-8')
elif type_number == TYPE_PICKLED:
return do_unpickle(value)
elif type_number == TYPE_BOOL:
return bool(int(value))
elif type_number == TYPE_INT:
return int(value)
elif type_number == TYPE_LONG:
return long(value)
else:
|
jacquev6/Pynamixel
|
Pynamixel/instructions/bus_tests.py
|
Python
|
mit
| 3,416
| 0.001464
|
# coding: utf8
# Copyright 2015 Vincent Jacques <vincent@vincent-jacques.net>
import unittest
import MockMockMock
from ..bus import Bus
from .ping import Ping
from .read_data import ReadData
from .write_data import WriteData
from .reg_write import RegWrite
from .action import Action
from .reset import Reset
from .sync_write import SyncWrite
class InstructionsOnBusTestCase(unittest.TestCase):
def setUp(self):
super(InstructionsOnBusTestCase, self).setUp()
self.mocks = MockMockMock.Engine()
self.hardware = self.mocks.create("hardware")
self.bus = Bus(self.hardware.object)
def tearDown(self):
self.mocks.tearDown()
super(InstructionsOnBusTestCase, self).tearDown()
def test_ping(self):
self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x01, 0xF5])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Ping())
def test_read_data(self):
# http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 1)
self.ha
|
rdware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x03])
self.hardware.expect.receive(3).and_return([0x00, 0x20, 0xDB])
ident, error, response = self.bus.send(0x01, ReadData(0x2B))
self.assertEqual(response.data, [0x20])
def test_write_data(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x06, 0x03, 0x2B, 0x10, 0x
|
11, 0x12, 0x97])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x01, WriteData(0x2B, [0x10, 0x11, 0x12]))
def test_reg_write(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x06, 0x04, 0x2B, 0x10, 0x11, 0x12, 0x96])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x01, RegWrite(0x2B, [0x10, 0x11, 0x12]))
def test_action(self):
self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x05, 0xF1])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Action())
def test_reset(self):
self.hardware.expect.send([0xFF, 0xFF, 0x07, 0x02, 0x06, 0xF0])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x07, 0x02])
self.hardware.expect.receive(2).and_return([0x00, 0xF6])
self.bus.send(0x07, Reset())
def test_sync_write(self):
# http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 5)
self.hardware.expect.send([
0xFF, 0xFF, 0xFE, 0x18, 0x83, 0x1E, 0x04,
0x00, 0x10, 0x00, 0x50, 0x01,
0x01, 0x20, 0x02, 0x60, 0x03,
0x02, 0x30, 0x00, 0x70, 0x01,
0x03, 0x20, 0x02, 0x80, 0x03,
0x12,
])
self.bus.broadcast(SyncWrite(
0x1E,
{
0: [0x10, 0x00, 0x50, 0x01],
1: [0x20, 0x02, 0x60, 0x03],
2: [0x30, 0x00, 0x70, 0x01],
3: [0x20, 0x02, 0x80, 0x03],
}
))
|
subeax/grab
|
test/case/selector_kit.py
|
Python
|
mit
| 3,668
| 0.004635
|
# coding: utf-8
from unittest import TestCase
#import os
#import sys
#root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
#sys.path.insert(0, root)
from test.util import GRAB_TRANSPORT, ignore_transport, only_transport
from test.server import SERVER
from grab.selector import KitSelector
from grab import Grab
from grab.util.py3k_support import *
HTML = """
<html>
<body>
<h1>test</h1>
<ul>
<li>one</li>
<li>two</li>
<li>three</li>
<li class="zzz" id="6">z 4 foo</li>
</ul>
<ul id="second-list">
<li class="li-1">yet one</li>
<li class="li-2">yet two</li>
</ul>
</body>
</html>
"""
class KitSelectorTestCase(TestCase):
def setUp(self):
g = Grab(transport='grab.transport.kit.KitTransport')
SERVER.RESPONSE['get'] = HTML
g.go(SERVER.BASE_URL)
self.qt_doc = g.transport.kit.page.mainFrame().documentElement()
def test_in_general(self):
sel = KitSelector(self.qt_doc)
def test_select_node(self):
sel = KitSelector(self.qt_doc).select('h1')[0]
self.assertEquals('test', sel.node.toInnerXml())
def test_html(self):
sel = KitSelector(self.qt_doc).select('h1')[0]
self.assertEquals('<h1>test</h1>', sel.html())
def test_textselector(self):
self.assertEquals('one', KitSelector(self.qt_doc).select('li').text())
def test_number(self):
self.assertEquals(4, KitSelector(self.qt_doc).select('li.zzz').number())
# TODO
# test the ID selector (#6)
#def test_text_selector(self):
#sel = KitSelector(self.qt_doc).select('//li/text()').one()
#self.assertTrue(isinstance(sel, TextSelector))
## TODO: add --pyquery flag to runtest script
##def test_select_pyquery(self):
##root = Selector(self.qt_doc)
##self.assertEquals('test', root.select(pyquery='h1')[0].node.text)
##self.assertEquals('z 4 foo', root.select(p
|
yquery='body')[0].select(pyquery='#6')[0].node.text)
def test_select_select(self):
root = KitSelector(self.qt_doc)
self.assertEquals(set(['one', 'yet one']),
set([x.text() for x in root.select('ul').select('li:first-child')]),
)
def test_text_list(self):
root = KitSelector(self.qt_doc)
self.as
|
sertEquals(set(['one', 'yet one']),
set(root.select('ul > li:first-child').text_list()),
)
def test_attr_list(self):
root = KitSelector(self.qt_doc)
self.assertEquals(set(['li-1', 'li-2']),
set(root.select('ul[id=second-list] > li')\
.attr_list('class'))
)
class TestSelectorList(TestCase):
def setUp(self):
g = Grab(transport='grab.transport.kit.KitTransport')
SERVER.RESPONSE['get'] = HTML
g.go(SERVER.BASE_URL)
self.qt_doc = g.transport.kit.page.mainFrame().documentElement()
def test_one(self):
sel = KitSelector(self.qt_doc).select('ul > li')
self.assertEquals('one', unicode(sel.one().node.toPlainText()))
self.assertEquals('one', sel.text())
def test_number(self):
sel = KitSelector(self.qt_doc).select('li:nth-child(4)')
self.assertEquals(4, sel.number())
def test_exists(self):
sel = KitSelector(self.qt_doc).select('li:nth-child(4)')
self.assertEquals(True, sel.exists())
sel = KitSelector(self.qt_doc).select('li:nth-child(5)')
self.assertEquals(False, sel.exists())
|
OpenMined/PySyft
|
packages/syft/tests/syft/lib/python/string/string_serde_test.py
|
Python
|
apache-2.0
| 780
| 0
|
# syft absolute
import syft as sy
from syft.lib.python.string import String
from syft.proto.lib.python.string_pb2 import String as String_PB
def test_string_serde() -> None:
syft_string = String("Hello OpenMi
|
ned")
serialized = syft_string._object2proto()
assert isinstance(seri
|
alized, String_PB)
deserialized = String._proto2object(proto=serialized)
assert isinstance(deserialized, String)
assert deserialized.id == syft_string.id
def test_string_send(client: sy.VirtualMachineClient) -> None:
syft_string = String("Hello OpenMined!")
ptr = syft_string.send(client)
# Check pointer type
assert ptr.__class__.__name__ == "StringPointer"
# Check that we can get back the object
res = ptr.get()
assert res == syft_string
|
ibc/MediaSoup
|
worker/deps/gyp/test/mac/gyptest-app-assets-catalog.py
|
Python
|
isc
| 4,164
| 0.010567
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
from __future__ import print_function
import TestGyp
import TestMac
import os
import plistlib
import subprocess
import sys
if sys.platform == 'darwin':
print("This test is currently disabled: https://crbug.com/483696.")
sys.exit(0)
def ExpectEq(expected, actual):
if expected != actual:
print('Expected "%s", got "%s"' % (expected, actual), file=sys.stderr)
test.fail_test()
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.p
|
ath.join(dirpath, f)[len(path) + 1:])
return result
# Xcode supports for assets catalog was introduced in Xcode 6.0
if sys.platform == 'darwin' and TestMac.Xcode.Version() >= '0600':
test_gyp_path = 'test-assets-catalog.
|
gyp'
test_app_path = 'Test App Assets Catalog Gyp.app'
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp(test_gyp_path, chdir='app-bundle')
test.build(test_gyp_path, test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist(
os.path.join(test_app_path, 'Contents/MacOS/Test App Assets Catalog Gyp'),
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path(
os.path.join(test_app_path, 'Contents/Info.plist'),
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(
info_plist,
'com.google.Test-App-Assets-Catalog-Gyp') # Variable expansion
test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
if test.format != 'make':
# TODO: Synthesized plist entries aren't hooked up in the make generator.
machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
plist = plistlib.readPlist(info_plist)
ExpectEq(machine, plist['BuildMachineOSBuild'])
expected = ''
version = TestMac.Xcode.SDKVersion()
expected = 'macosx' + version
ExpectEq(expected, plist['DTSDKName'])
sdkbuild = TestMac.Xcode.SDKBuild()
if not sdkbuild:
# Above command doesn't work in Xcode 4.2.
sdkbuild = plist['BuildMachineOSBuild']
ExpectEq(sdkbuild, plist['DTSDKBuild'])
ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
# Resources
strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
for f in strings_files:
strings = test.built_file_path(
os.path.join(test_app_path, 'Contents/Resources/English.lproj', f),
chdir='app-bundle')
test.must_exist(strings)
# Xcodes writes UTF-16LE with BOM.
contents = open(strings, 'rb').read()
if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
test.fail_test()
test.built_file_must_exist(
os.path.join(
test_app_path, 'Contents/Resources/English.lproj/MainMenu.nib'),
chdir='app-bundle')
# make does not supports .xcassets files
extra_content_files = []
if test.format != 'make':
extra_content_files = ['Contents/Resources/Assets.car']
for f in extra_content_files:
test.built_file_must_exist(
os.path.join(test_app_path, f),
chdir='app-bundle')
# Packaging
test.built_file_must_exist(
os.path.join(test_app_path, 'Contents/PkgInfo'),
chdir='app-bundle')
test.built_file_must_match(
os.path.join(test_app_path, 'Contents/PkgInfo'), 'APPLause',
chdir='app-bundle')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path(test_app_path, chdir='app-bundle'))) != \
set(['Contents/MacOS/Test App Assets Catalog Gyp',
'Contents/Info.plist',
'Contents/Resources/English.lproj/MainMenu.nib',
'Contents/PkgInfo',
] + extra_content_files +
[os.path.join('Contents/Resources/English.lproj', f)
for f in strings_files]):
test.fail_test()
test.pass_test()
|
haphaeu/yoshimi
|
mth/MultiThread_ClientPool.py
|
Python
|
lgpl-3.0
| 1,065
| 0.019718
|
import threading
import Queue
from random import random
theVar = 0
class MyThread ( threading.Thread ):
def run ( self ):
global theVar
while True:
client=myPool.get()
print '- iniciando thread-'
if client=='stop':
print 'Thread terminou <-'
return 0
elif client!=None:
for x in xrange(int(random()*1000000)): continue
theVar = theVar + 1
print '- Thread rodou -'
# ##
print '#####################################################'
#number of
|
threads
NUMTHREADS = 3
NUMPROCESSES=20
#create a pool manager
myPool = Queue.Queue(0)
#starts only 2 threads
for x in xrange (NUMTHREADS):
print '-> Iniciando thread', x
MyThread().start()
#pass data into thread pool
#and run thread a couple of times
for x in xrange(NUMPROCESSES):
print '- passando dados par
|
a thread -'
myPool.put('dummy')
#stop the threads
for x in xrange(NUMTHREADS):
print '- Stopping thread -'
myPool.put('stop')
|
talon-one/talon_one.py
|
test/test_attributes_mandatory.py
|
Python
|
mit
| 2,093
| 0.0043
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.attributes_mandatory import AttributesMandatory # noqa: E501
from talon_one.rest import ApiException
class TestAttributesMandatory(unittest.TestCase):
"""AttributesMandator
|
y unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AttributesMandatory
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
|
# model = talon_one.models.attributes_mandatory.AttributesMandatory() # noqa: E501
if include_optional :
return AttributesMandatory(
campaigns = [
'0'
],
coupons = [
'0'
]
)
else :
return AttributesMandatory(
)
def testAttributesMandatory(self):
"""Test AttributesMandatory"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
jawilson/home-assistant
|
tests/components/recorder/test_statistics.py
|
Python
|
apache-2.0
| 24,602
| 0.001179
|
"""The tests for sensor recorder platform."""
# pylint: disable=pro
|
tected-access,invalid-name
from datetime import timedelta
from unittest.mock import patch, sentinel
import pytest
from pytest import approx
from homeassistant.components.recorder import history
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.re
|
corder.models import (
StatisticsShortTerm,
process_timestamp_to_utc_isoformat,
)
from homeassistant.components.recorder.statistics import (
async_add_external_statistics,
get_last_short_term_statistics,
get_last_statistics,
get_metadata,
list_statistic_ids,
statistics_during_period,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import setup_component
import homeassistant.util.dt as dt_util
from tests.common import mock_registry
from tests.components.recorder.common import wait_recording_done
def test_compile_hourly_statistics(hass_recorder):
"""Test compiling hourly statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
stats = statistics_during_period(hass, zero, period="5minute", **kwargs)
assert stats == {}
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
recorder.do_adhoc_statistics(start=zero)
recorder.do_adhoc_statistics(start=four)
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(14.915254237288135),
"min": approx(10.0),
"max": approx(20.0),
"last_reset": None,
"state": None,
"sum": None,
}
expected_2 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(four),
"end": process_timestamp_to_utc_isoformat(four + timedelta(minutes=5)),
"mean": approx(20.0),
"min": approx(20.0),
"max": approx(20.0),
"last_reset": None,
"state": None,
"sum": None,
}
expected_stats1 = [
{**expected_1, "statistic_id": "sensor.test1"},
{**expected_2, "statistic_id": "sensor.test1"},
]
expected_stats2 = [
{**expected_1, "statistic_id": "sensor.test2"},
{**expected_2, "statistic_id": "sensor.test2"},
]
# Test statistics_during_period
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2}
stats = statistics_during_period(
hass, zero, statistic_ids=["sensor.test2"], period="5minute"
)
assert stats == {"sensor.test2": expected_stats2}
stats = statistics_during_period(
hass, zero, statistic_ids=["sensor.test3"], period="5minute"
)
assert stats == {}
# Test get_last_short_term_statistics
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
stats = get_last_short_term_statistics(hass, 1, "sensor.test1", True)
assert stats == {"sensor.test1": [{**expected_2, "statistic_id": "sensor.test1"}]}
stats = get_last_short_term_statistics(hass, 2, "sensor.test1", True)
assert stats == {"sensor.test1": expected_stats1[::-1]}
stats = get_last_short_term_statistics(hass, 3, "sensor.test1", True)
assert stats == {"sensor.test1": expected_stats1[::-1]}
stats = get_last_short_term_statistics(hass, 1, "sensor.test3", True)
assert stats == {}
@pytest.fixture
def mock_sensor_statistics():
"""Generate some fake statistics."""
def sensor_stats(entity_id, start):
"""Generate fake statistics."""
return {
"meta": {
"statistic_id": entity_id,
"unit_of_measurement": "dogs",
"has_mean": True,
"has_sum": False,
},
"stat": {"start": start},
}
def get_fake_stats(_hass, start, _end):
return [
sensor_stats("sensor.test1", start),
sensor_stats("sensor.test2", start),
sensor_stats("sensor.test3", start),
]
with patch(
"homeassistant.components.sensor.recorder.compile_statistics",
side_effect=get_fake_stats,
):
yield
@pytest.fixture
def mock_from_stats():
"""Mock out Statistics.from_stats."""
counter = 0
real_from_stats = StatisticsShortTerm.from_stats
def from_stats(metadata_id, stats):
nonlocal counter
if counter == 0 and metadata_id == 2:
counter += 1
return None
return real_from_stats(metadata_id, stats)
with patch(
"homeassistant.components.recorder.statistics.StatisticsShortTerm.from_stats",
side_effect=from_stats,
autospec=True,
):
yield
def test_compile_periodic_statistics_exception(
hass_recorder, mock_sensor_statistics, mock_from_stats
):
"""Test exception handling when compiling periodic statistics."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
now = dt_util.utcnow()
recorder.do_adhoc_statistics(start=now)
recorder.do_adhoc_statistics(start=now + timedelta(minutes=5))
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(now),
"end": process_timestamp_to_utc_isoformat(now + timedelta(minutes=5)),
"mean": None,
"min": None,
"max": None,
"last_reset": None,
"state": None,
"sum": None,
}
expected_2 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(now + timedelta(minutes=5)),
"end": process_timestamp_to_utc_isoformat(now + timedelta(minutes=10)),
"mean": None,
"min": None,
"max": None,
"last_reset": None,
"state": None,
"sum": None,
}
expected_stats1 = [
{**expected_1, "statistic_id": "sensor.test1"},
{**expected_2, "statistic_id": "sensor.test1"},
]
expected_stats2 = [
{**expected_2, "statistic_id": "sensor.test2"},
]
expected_stats3 = [
{**expected_1, "statistic_id": "sensor.test3"},
{**expected_2, "statistic_id": "sensor.test3"},
]
stats = statistics_during_period(hass, now, period="5minute")
assert stats == {
"sensor.test1": expected_stats1,
"sensor.test2": expected_stats2,
"sensor.test3": expected_stats3,
}
def test_rename_entity(hass_recorder):
"""Test statistics is migrated when entity_id is changed."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
entity_reg = mock_registry(hass)
reg_entry = entity_reg.async_get_or_create(
"sensor",
"test",
"unique_0000",
suggested_object_id="test1",
)
assert reg_entry.entity_id == "sensor.test1"
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}):
stats = statistics_during_period(hass, zero, period="5minute", **kwargs)
assert stats == {}
stats = get_last_short_term_statistics(hass, 0, "sensor.test1", True)
assert stats == {}
recorder.do_adhoc_statistics(start=zero)
wait_recording_done(hass)
expected_1 = {
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(14.915254237288135),
"min": ap
|
perryl/morph
|
morphlib/git.py
|
Python
|
gpl-2.0
| 10,923
| 0.000275
|
# Copyright (C) 2011-2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import cliapp
import ConfigParser
import logging
import os
import re
import string
import StringIO
import sys
import morphlib
class NoModulesFileError(cliapp.AppException):
def __init__(self, repo, ref):
Exception.__init__(self,
'%s:%s has no .gitmodules file.' % (repo, ref))
class Submodule(object):
def __init__(self, name, url, sha1, path):
self.name = name
self.url = url
self.commit = sha1
self.path = path
def __str__(self):
return "{name}|{url}|{path}".format(name=self.name,
url=self.url,
path=self.path)
class InvalidSectionError(cliapp.AppException):
def __init__(self, repo, ref, section):
Exception.__init__(self,
'%s:%s:.gitmodules: Found a misformatted section '
'title: [%s]' % (repo, ref, section))
class Submodules(object):
def __init__(self, repo, ref, runcmd_cb=cliapp.runcmd):
self.repo = repo
self.ref = ref
self.submodules = []
self.runcmd_cb = runcmd_cb
def load(self):
content = self._read_gitmodules_file()
io = StringIO.StringIO(content)
parser = ConfigParser.RawConfigParser()
parser.readfp(io)
self._validate_and_read_entries(parser)
def _read_gitmodules_file(self):
try:
# try to read the .gitmodules file from the repo/ref
content = gitcmd(self.runcmd_cb, 'cat-file', 'blob',
'%s:.gitmodules' % self.ref, cwd=self.repo,
ignore_fail=True)
# drop indentation in sections, as RawConfigParser cannot handle it
return '\n'.join([line.strip() for line in content.splitlines()])
except cliapp.AppException:
raise NoModulesFileError(self.repo, self.ref)
def _validate_and_read_entries(self, parser):
gd = morphlib.gitdir.GitDirectory(self.repo)
for section in parser.sections():
# validate section name against the 'section "foo"' pattern
section_pattern = r'submodule "(.*)"'
if re.match(section_pattern, section):
# parse the submodule name, URL and path
name = re.sub(section_pattern, r'\1', section)
url = parser.get(section, 'url')
path = parser.get(section, 'path')
try:
sha1 = gd.get_submodule_commit(self.ref, path)
except morphlib.gitdir.MissingSubmoduleCommitError:
# Ignore submodules listed in .gitmodules file that are
# not pointing to a git commit object. If you try to clone
# a repo with submodules without a commit object (using
# Git), nothing will happen, and Git will not even complain
continue
# create a submodule object
submodule = Submodule(name, url, sha1, path)
self.submodules.append(submodule)
else:
raise InvalidSectionError(self.repo, self.ref, section)
def __iter__(self):
for submodule in self.submodules:
yield submodule
def __len__(self):
return len(self.submodules)
def update_submodules(app, repo_dir): # pragma: no cover
'''Set up repo submodules, rewriting the URLs to expand prefixes
We do this automatically rather than leaving it to the user so that they
don't have to worry about the prefixed URLs manually.
'''
if os.path.exists(os.path.join(repo_dir, '.gitmodules')):
resolver = morphlib.repoaliasresolver.RepoAliasResolver(
app.settings['repo-alias'])
gitcmd(app.runcmd, 'submodule', 'init', cwd=repo_dir)
submodules = Submodules(repo_dir, 'HEAD')
submodules.load()
for submodule in submodules:
gitcmd(app.runcmd, 'config', 'submodule.%s.url' % submodule.name,
resolver.pull_url(submodule.url), cwd=repo_dir)
gitcmd(app.runcmd, 'submodule', 'update', cwd=repo_dir)
class ConfigNotSetException(cliapp.AppException):
def __init__(self, missing, defaults):
self.missing = missing
self.defaults = defaults
if len(missing) == 1:
self.preamble = ('Git configuration for %s has not been set. '
'Please set it with:' % missing[0])
else:
self.preamble = ('Git configuration for keys %s and %s '
'have not been set. Please set them with:'
% (', '.join(missing[:-1]), missing[-1]))
def __str__(self):
lines = [self.preamble]
lines.extend('git config --global %s \'%s\'' % (k, self.defaults[k])
for k in self.missing)
return '\n '.join(lines)
class IdentityNotSetException(ConfigNotSetException):
preamble = 'Git user info incomplete. Please set your identity, using:'
def __init__(self, missing):
self.defaults = {"user.name": "My Name",
"user.email": "me@example.com"}
self.missing = missing
def get_user_name(runcmd):
'''Get user.name configuration setting. Complain if none was found.'''
if 'GIT_AUTHOR_NAME' in os.environ:
return os.environ['GIT_AUTHOR_NAME'].strip()
try:
config = check_config_set(runcmd, keys={"user.name": "My Name"})
return config['user.name']
except ConfigNotSetException as e:
raise IdentityNotSetException(e.missing)
def get_user_email(runcmd):
'''Get user.email configuration setting. Complain if none wa
|
s found.'''
if 'GIT_AUTHOR_EMAIL' in os.environ:
return os.environ['GIT_AUTHOR_EMAIL'].strip()
try:
cfg = check_config_set(runcmd, keys={"user.email": "me@example.com"})
return cfg['user.email']
except ConfigNotSetException as e:
raise IdentityNotSetException(e.missing)
def check_config_set(runcmd, keys, cwd='.'):
''' Check whether the given keys have values in git config. '''
missing = []
found = {}
for key in keys:
try:
va
|
lue = gitcmd(runcmd, 'config', key, cwd=cwd,
print_command=False).strip()
found[key] = value
except cliapp.AppException:
missing.append(key)
if missing:
raise ConfigNotSetException(missing, keys)
return found
def copy_repository(runcmd, repo, destdir, is_mirror=True):
'''Copies a cached repository into a directory using cp.
This also fixes up the repository afterwards, so that it can contain
code etc. It does not leave any given branch ready for use.
This is slightly faster than `git clone` for large repositories,
as of Git 2.3.0. Long term, we should fix `git clone` to be as fast
as possible, and use that.
'''
if is_mirror == False:
runcmd(['cp', '-a', os.path.join(repo, '.git'),
os.path.join(destdir, '.git')])
return
runcmd(['cp', '-a', repo, os.path.join(destdir, '.git')])
runcmd(['chown', '-R', '%s:%s' % (os.getuid(), os.getgid()), destdir])
# core.bare should be false so that git believes work trees are possible
gitcmd(runcmd, 'config', 'core.bare', 'false', cwd=destdir)
# we do not want the origin remote to behave as a mirror for pulls
gitcmd(runcmd, 'confi
|
yishayv/lyacorr
|
mpi_helper.py
|
Python
|
mit
| 2,068
| 0
|
from __future__ import print_function
import time
import numpy as np
from mpi4py import MPI
from python_compat import range
comm = MPI.COMM_WORLD
def r_print(*args):
"""
print message on the root node (rank 0)
:param args:
:return:
"""
if comm.rank == 0:
print('
|
ROOT:', end=' ')
for i in args:
print(i, end=' ')
# noinspection PyArgumentList
print()
def l_print(*args):
"""
print message on each node, synchronized
:param args:
:return:
"""
for rank in range(0, comm.size):
comm.Barrier()
if rank == comm.rank:
l_print_no_barrier(*args)
comm.Barrier()
def l_print_no_barrier(*args):
""
|
"
print message on each node
:param args:
:return:
"""
print(comm.rank, ':', end=' ')
for i in args:
print(i, end=' ')
# noinspection PyArgumentList
print()
def get_chunks(num_items, num_steps):
"""
divide items into n=num_steps chunks
:param num_items:
:param num_steps:
:return: chunk sizes, chunk offsets
"""
chunk_sizes = np.zeros(num_steps, dtype=int)
chunk_sizes[:] = num_items // num_steps
chunk_sizes[:num_items % num_steps] += 1
chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)
chunk_offsets[0] = 0
return chunk_sizes, chunk_offsets
def barrier_sleep(mpi_comm=comm, tag=1747362612, sleep=0.1, use_yield=False):
"""
As suggested by Lisandro Dalcin at:
https://groups.google.com/forum/?fromgroups=#!topic/mpi4py/nArVuMXyyZI
"""
size = mpi_comm.Get_size()
if size == 1:
return
rank = mpi_comm.Get_rank()
mask = 1
while mask < size:
dst = (rank + mask) % size
src = (rank - mask + size) % size
req = mpi_comm.isend(None, dst, tag)
while not mpi_comm.Iprobe(src, tag):
if use_yield:
yield False
time.sleep(sleep)
mpi_comm.recv(None, src, tag)
req.Wait()
mask <<= 1
if use_yield:
yield True
|
YaleDHLab/image-segmentation
|
yale_daily_news/segment_ydn_images.py
|
Python
|
mit
| 24,116
| 0.011403
|
from __future__ import division
from multiprocessing import Pool
from collections import defaultdict
from skimage import io
from scipy import ndimage
from shutil import Error, move, rmtree
import numpy as np
import glob, os, codecs, sys, json
'''
## Processing notes
The Alto XML structure is as follows:
Each `root_dir` has multiple `issues`.
Each `issue` has multiple `pages`.
Each `page` has multiple `articles`.
Each `article` has multiple `images`.
Each image can belong to one page (if the article fits entirely on one page)
or multiple pages (if the article spans multiple pages).
'''
######################################
# Convert jp2 images to numpy arrays #
######################################
def convert_jp2_images_to_numpy_arrays(root_data_directory):
'''
Read in the path to a directory that contains subdirectories
for each newspaper issue, iterate over the jp2 files
in those directories and write each to disk as a numpy array
'''
# each image file in the issue_directory contains a single page of the newspaper
issue_directories = get_issue_directories(root_data_directory)
# iterate over each issue directory
for issue_directory in issue_directories:
# find all jp2 images within the current issue directory
issue_images = get_images_in_directory(issue_directory)
# create process pool using all available cpu processors
pool_one = Pool(n_processes)
# use the process pool to convert each jp2 image to a numpy array
for result in pool_one.imap(image_path_to_npy, issue_images):
pass
pool_one.close()
pool_one.join()
def get_issue_directories(directory_with_issue_directories):
'''
Read in the path to a directory that contains a series of
subdirectories, each of which should contain one or more files
for the images/pages in that issue of the paper. Return an array
of all of the issue subdirectories
'''
return glob.glob(directory_with_issue_directories + '/*')[:max_files_to_process]
def get_images_in_directory(path_to_directory):
'''
Read in a path to a directory and return an array of jp2
files in that directory
'''
return list( glob.glob(path_to_directory + '/*.jp2')[:max_files_to_process] )
def image_path_to_npy(path_to_jp2_image):
'''
Read in the full path to a jp2 image, read that image into memory,
convert to a numpy array and save as a npy file
'''
jp2_array = jp2_path_to_array(path_to_jp2_image)
if jp2_array is not None:
write_jp2_array_to_disk(jp2_array, path_to_jp2_image)
def jp2_path_to_array(path_to_jp2_file):
'''
Read in the path to a jp2 image file and return that
file as a numpy array that represents the pixel values of
that image
'''
# try to read the numpy array into memory from a npy file.
# if the image hasn't been converted yet, convert it and then write it
# at the end of this loop, to save on i/o the next time we process this file
try:
jp2_issue_directory = path_to_jp2_file.split('/')[-2]
jp2_basename = os.path.basename(path_to_jp2_file)
path_to_saved_numpy_array = './numpy_arrays/' + jp2_issue_directory
path_to_saved_numpy_array += '/' + jp2_basename + '.npy'
jp2_array = np.load(path_to_saved_numpy_array)
if verbosity_level > 0:
print 'read the following image from disk', path_to_jp2_file
return jp2_array
# if an exception arises, then read the image from disk and write its npy file
except Exception as exc:
try:
jp2_array = io.imread(path_to_jp2_file, plugin='freeimage')
write_jp2_array_to_disk(jp2_array, path_to_jp2_file)
return jp2_array
except Exception:
with open('unprocessable-images.txt', 'a') as out:
out.write(path_to_jp2_file + '\n')
def write_jp2_array_to_disk(jp2_array, jp2_path):
'''
Read in a numpy array and the path to the jp2 file, and
write that numpy array to disk in a directory with the same
name as the issue subdirectory from which the image was read
'''
jp2_filename = os.path.basename(jp2_path)
jp2_issue_directory = jp2_path.split('/')[-2]
out_directory = 'numpy_arrays/' + jp2_issue_directory + '/'
out_path = out_directory + jp2_filename + '.npy'
if not os.path.exists(out_directory):
os.makedirs(out_directory)
np.save(out_path, jp2_array)
######################
# XML Helper Methods #
######################
def get_page_mappings(issue_directory):
'''
In 1.articles.xml (e.g.) there are <coord> elements with
inpage attributes:
<page id="1" {...} unit="pixel">
<article>
<id>DIVL11</id>
<title></title>
<type>ARTICLE</type>
<clip type="normal">
<coord inpage="1">425:619:210:20</coord> <- inpage attribute
<coord inpage="1">185:666:687:202</coord>
<coord inpage="1">178:915:668:47</coord>
</clip>
{...}
</page>
These inpage values refer to the id attribute of the page
on which the given rect appears.
With some issue directories, there's a file /index.cpd that lists
the pages in 1-based index positions:
<?xml version="1.0"?>
<cpd>
<type>Document</type>
<page>
<pagetitle>Page 1</pagetitle>
<pagefile>1.jp2</pagefile>
<pageptr>+</pageptr>
</page>
<page>
<pagetitle>Page 2</pagetitle>
<pagefile>5.jp2</pagefile>
<pageptr>+</pageptr>
</page>
</cpd>
Here 1.jp2 is page id 1, 5.jp2 is page id 2, and so on. In all observed cases,
the image ids are sequential and they identify the images when those image
filenames are sorted numerically (e.g. 1.jp2,6.jp2,28.jp2...). Use this insight
to create the mappings from image filename to image id.
Return a mapping from page name to page id
and a mapping from page id to page name.
'''
page_file_to_page_id = {}
page_id_to_page_file = {}
images = glob.glob(issue_directory + '/*.jp2')
image_numbers = []
for i in images:
basename = os.path.basename(i)
image_number = int(basename.replace('.jp2',''))
image_numbers.append(image_number)
image_numbers.sort()
page_number = 1
for i in image_numbers:
for j in images:
image_filename = os.path.basename(j)
if str(i) + '.jp2' == image_filename:
# store the 1-based index position of this page image
page_file_to_page_id[image_filename] = page_number
page_id_to_page_file[page_number] = image_filename
page_number += 1
return page_id_to_page_file, page_file_to_page_id
def get_article_xml_files(issue_directory):
'''
Read in the path to a directory with files for a single issue and
return an array of article xml files within that directory's issue
'''
return glob.glob(issue_directory + '/*.articles.xml')
def read_xml_file(xml_file_path):
'''
Read in the path to an xml file and return that
xml file content in string form
'''
with codecs.open(xml_file_path, 'r', 'utf-8') as f:
return f.read()
def get_xml_articles(xml_content):
'''
Read in a string containing XML content and return an array
|
of the articles in that XML document
'''
articles = []
for i in xml_content.split('<
|
article')[1:]:
article_start = '>'.join(i.split('>')[1:])
article_content = article_start.split('</article')[0]
articles.append(article_content)
return articles
def get_article_clips(article, restrict_to_uc=1):
'''
Read in the xml content from an article and return an array of
the clips in that article. If restrict_to_uc == 1, only return
clips if they have type 'uc'
'''
article_clips = []
for i in article.split('<clip')[1:]:
if restrict_to_uc == 1:
if 'type="uc"' not in i:
continue
clip_start = '>'.join(i.split('>')[1:])
clip_content = clip_start.split('</clip')[0]
article_clips.append(clip_content)
return article_clips
def get_clip_coords(article_clip):
'''
Read in the clip content of a jp2 xml file and return an array of the
coord elements within that clip element
'''
clip_coords = []
for i in article_clip.split('\n')[1:-1]:
clip_coords.append(i.replace('\r',''))
return clip_coords
def get_coordinate_array(coord_element):
'''
|
diefenbach-fz/lfs-downloadable-products
|
lfs_downloadable_products/templatetags/lfs_downloadable_products_tags.py
|
Python
|
bsd-3-clause
| 954
| 0.002096
|
# django imports
from django.contrib.sites.models import Site
from django.template import Library
from django.utils.safestring import mark_safe
from lfs_downloadable_products import views
register = Library()
@register.simple_tag(takes_context=True)
def manage_attachments(context, product):
request = context.get('request', None)
result = views.manage_attachments(request, product, True)
return mark_safe(result)
@register.inclusion_tag('lfs_downloadable_products/display_attachments.html', takes_context=True)
def downloadable_attachments(context, order):
from lfs_downloadable_products.models import P
|
roductUrl
urls = []
exists = {}
for url in ProductUrl.objects.filter(order=
|
order).order_by("-creation_date"):
if url.attachment.id not in exists:
urls.append(url)
exists[url.attachment.id] = 1
return {
"urls": urls,
"domain": Site.objects.get_current().domain,
}
|
morenopc/edx-platform
|
lms/djangoapps/django_comment_client/forum/views.py
|
Python
|
agpl-3.0
| 18,118
| 0.004029
|
import json
import logging
import xml.sax.saxutils as saxutils
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.core.context_processors import csrf
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_GET
import newrelic.agent
from edxmako.shortcuts import render_to_response
from courseware.courses import get_course_with_access
from course_groups.cohorts import (is_course_cohorted, get_cohort_id, is_commentable_cohorted,
get_cohorted_commentables, get_course_cohorts, get_cohort_by_id)
from courseware.access import has_access
from django_comment_client.permissions import cached_has_permission
from django_comment_client.utils import (merge_dict, extract, strip_none, add_courseware_context)
import django_comment_client.utils as utils
import lms.lib.comment_client as cc
from xmodule.modulestore.locations import SlashSeparatedCourseKey
THREADS_PER_PAGE = 20
INLINE_THREADS_PER_PAGE = 20
PAGES_NEARBY_DELTA = 2
escapedict = {'"': '"'}
log = logging.getLogger("edx.discussions")
@newrelic.agent.function_trace()
def get_threads(request, course_id, discussion_id=None, per_page=THREADS_PER_PAGE):
"""
This may raise an appropriate subclass of cc.utils.CommentClientError
if something goes wrong.
"""
default_query_params = {
'page': 1,
'per_page': per_page,
'sort_key': 'date',
'sort_order': 'desc',
'text': '',
'commentable_id': discussion_id,
'course_id': course_id.to_deprecated_string(),
'user_id': request.user.id,
}
if not request.GET.get('sort_key'):
# If the user did not select a sort key, use their last used sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.retrieve()
# TODO: After the comment service is updated this can just be user.default_sort_key because the service returns the default value
default_query_params['sort_key'] = cc_user.get('default_sort_key') or default_query_params['sort_key']
else:
# If the user clicked a sort key, update their default sort key
cc_user = cc.User.from_django_user(request.user)
cc_user.default_sort_key = request.GET.get('sort_key')
cc_user.save()
#there are 2 dimensions to consider when executing a search with respect to group id
#is user a moderator
#did the user request a group
#if the user requested a group explicitly, give them that group, othewrise, if mod, show all, else if student, use cohort
group_id = request.GET.get('group_id')
if group_id == "all":
group_id = None
if not group_id:
if not cached_has_permission(request.user, "see_all_cohorts", course_id):
group_id = get_cohort_id(request.user, course_id)
if group_id:
default_query_params["group_id"] = group_id
#so by default, a moderator sees all items, and a student sees his cohort
query_params = merge_dict(default_query_params,
strip_none(extract(request.GET,
['page', 'sort_key',
'sort_order', 'text',
'commentable_ids', 'flagged'])))
threads, page, num_pages = cc.Thread.search(query_params)
#now add the group name if the thread has a group id
for thread in threads:
if thread.get('group_id'):
thread['group_name'] = get_cohort_by_id(course_id, thread.get('group_id')).name
thread['group_string'] = "This post visible only to Group %s." % (thread['group_name'])
else:
thread['group_name'] = ""
thread['group_string'] = "This post visible to everyone."
#patch for backward compatibility to comments service
if not 'pinned' in thread:
thread['pinned'] = False
query_params['page'] = page
query_params['num_pages'] = num_pages
return threads, query_params
@login_required
def inline_discussion(request, course_id, discussion_id):
"""
Renders JSON for DiscussionModules
"""
nr_transaction = newrelic.agent.current_transaction()
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load_forum', course_id)
threads, query_params = get_threads(request, course_id, discussion_id, per_page=INLINE_THREADS_PER_PAGE)
cc_user = cc.User.from_django_user(request.user)
user_info = cc_user.to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)
allow_anonymous = course.allow_anonymous
allow_anonymous_to_peers = course.allow_anonymous_to_peers
#since inline is all one commentable, only show or allow the choice of cohorts
#if the commentable is cohorted, otherwise everything is not cohorted
#and no one has the option of choosing a cohort
is_cohorted = is_course_cohorted(course_id) and is_commentable_cohorted(course_id, discussion_id)
is_moderator = cached_has_permission(request.user, "see_all_cohorts", course_id)
cohorts_list = list()
if is_cohorted:
cohorts_list.append({'name': _('All Groups'), 'id': None})
#if you're a mod, send all cohorts and let you pick
if is_moderator:
cohorts = get_course_cohorts(course_id)
for cohort in cohorts:
cohorts_list.append({'name': cohort.name, 'id': cohort.id})
else:
#students don't get to choose
cohorts_list = None
return utils.JsonResponse({
'discussion_data': map(utils.safe_content, threads),
'user_info': user_info,
'annotated_content_info': annotated_content_info,
'page': query_params['page'],
'num_pages': query_params['num_pages'],
'roles': utils.get_role_ids(course_id),
'allow_anonymous_to_peers': allow_anonymous_to_peers,
'allow_anonymous': allow_anonymous,
'cohorts': cohorts_list,
'is_moderator': is_moderator,
'is_cohorted': is_cohorted
})
@login_required
def forum_form_discussion(request, course_id):
"""
Renders the main Discussion page, potentially filtered by a search query
"""
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
nr_transaction = newrelic.agent.current_transaction()
course = get_course_with_access(request.user, 'load_forum', course_
|
id)
with newrelic.agent.FunctionTrace(nr_transaction, "get_discussion_category_map"):
category_map = utils.get_discussion_category_map(course)
try:
unsafethreads, query_params = get_threads(request, course_id) # This might process a search query
threads = [utils.safe_content(thread) for thread in
|
unsafethreads]
except cc.utils.CommentClientMaintenanceError:
log.warning("Forum is in maintenance mode")
return render_to_response('discussion/maintenance.html', {})
user = cc.User.from_django_user(request.user)
user_info = user.to_dict()
with newrelic.agent.FunctionTrace(nr_transaction, "get_metadata_for_threads"):
annotated_content_info = utils.get_metadata_for_threads(course_id, threads, request.user, user_info)
with newrelic.agent.FunctionTrace(nr_transaction, "add_courseware_context"):
add_courseware_context(threads, course)
if request.is_ajax():
return utils.JsonResponse({
'discussion_data': threads, # TODO: Standardize on 'discussion_data' vs 'threads'
'annotated_content_info': annotated_content_info,
'num_pages': query_params['num_pages'],
'page': query_params['page'],
})
else:
with newrelic.agent.FunctionTrace(nr_transaction, "get_cohort_info"):
cohorts = get_course_cohorts(course_id)
cohorted_commentables = get_cohorted_co
|
moonbury/notebooks
|
github/MatplotlibCookbook/Chapter 6/06.py
|
Python
|
gpl-3.0
| 748
| 0.025401
|
import numpy
from matplotlib import pyplot as plot
import matplotlib.cm as cm
def iter_count(C, max_iter):
X = C
for n in range(max_iter):
if abs(X) > 2.:
return n
X = X ** 2 + C
return max_iter
N = 512
max
|
_iter = 64
xmin, xmax, ymin, ymax = -0.32, -0.22, 0.8, 0.9
X = numpy.linspace(xmin, xmax, N)
Y = numpy.linspace(ymin, ymax, N)
Z = numpy.empty((N, N))
for i, y in enumerate(Y):
for j, x in enumerate(X):
Z[i, j] = iter_count(complex(x, y), max_iter)
plot.imshow(Z,
cmap = cm.binary,
interpolation = 'bicubic',
origin = 'lower',
extent=(xmin, xmax, ymin, ymax))
levels = [8, 12, 16, 20]
ct = plot.contour(X, Y, Z, levels, cmap = cm.gray)
plot.clab
|
el(ct, fmt='%d')
plot.show()
|
Spirotot/py3status
|
py3status/modules/vnstat.py
|
Python
|
bsd-3-clause
| 4,225
| 0.000237
|
# -*- coding: utf-8 -*-
"""
Display vnstat statistics.
Coloring rules.
If value is bigger that dict key, status string will turn to color, specified
in the value.
Example:
coloring = {
800: "#dddd00",
900: "#dd0000",
}
(0 - 800: white, 800-900: yellow, >900 - red)
Format of status string placeholders:
{down} download
{total} total
{up} upload
Requires:
- external program called `vnstat` installed and configured to work.
@author shadowprince
@license Eclipse Public License
"""
from __future__ import division # python2 compatibility
from time import time
from subprocess import check_output
def get_stat(statistics_type):
""
|
"
Get stati
|
stics from devfile in list of lists of words
"""
def filter_stat():
out = check_output(["vnstat", "--dumpdb"]).decode("utf-8").splitlines()
for x in out:
if x.startswith("{};0;".format(statistics_type)):
return x
try:
type, number, ts, rxm, txm, rxk, txk, fill = filter_stat().split(";")
except OSError as e:
print("Looks like you haven't installed or configured vnstat!")
raise e
except ValueError:
err = "vnstat returned wrong output, "
err += "maybe it's configured wrong or module is outdated"
raise RuntimeError(err)
up = (int(txm) * 1024 + int(txk)) * 1024
down = (int(rxm) * 1024 + int(rxk)) * 1024
return {
"up": up,
"down": down,
"total": up+down
}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 180
coloring = {}
format = "{total}"
# initial multiplier, if you want to get rid of first bytes, set to 1 to
# disable
initial_multi = 1024
left_align = 0
# if value is greater, divide it with unit_multi and get next unit from
# units
multiplier_top = 1024
precision = 1
statistics_type = "d" # d for daily, m for monthly
unit_multi = 1024 # value to divide if rate is greater than multiplier_top
def __init__(self, *args, **kwargs):
"""
Format of total, up and down placeholders under FORMAT.
As default, substitutes left_align and precision as %s and %s
Placeholders:
value - value (float)
unit - unit (string)
"""
self.last_stat = get_stat(self.statistics_type)
self.last_time = time()
self.last_interface = None
self.value_format = "{value:%s.%sf} {unit}" % (
self.left_align, self.precision
)
# list of units, first one - value/initial_multi, second - value/1024,
# third - value/1024^2, etc...
self.units = ["kb", "mb", "gb", "tb", ]
def _divide_and_format(self, value):
"""
Divide a value and return formatted string
"""
value /= self.initial_multi
for i, unit in enumerate(self.units):
if value > self.multiplier_top:
value /= self.unit_multi
else:
break
return self.value_format.format(value=value, unit=unit)
def currentSpeed(self, i3s_output_list, i3s_config):
stat = get_stat(self.statistics_type)
color = None
keys = list(self.coloring.keys())
keys.sort()
for k in keys:
if stat["total"] < k * 1024 * 1024:
break
else:
color = self.coloring[k]
response = {
'cached_until': time() + self.cache_timeout,
'full_text': self.format.format(
total=self._divide_and_format(stat['total']),
up=self._divide_and_format(stat['up']),
down=self._divide_and_format(stat['down']),
),
'transformed': True
}
if color:
response["color"] = color
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
while True:
print(x.currentSpeed([], config))
sleep(1)
|
chilleo/ALPHA
|
CommandLineFiles/RunDGEN.py
|
Python
|
mit
| 100,444
| 0.004888
|
import re
import os
import itertools
import time
from string import upper
import ete3
import copy
import subprocess
from collections import defaultdict
from sys import platform
from scipy import stats
from ete3 import Tree
from natsort import natsorted
from Bio import AlignIO
"""
Functions:
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
def run_saved_dgen(stat_file,sequence_files,window_size=999999999999999999999999999999,
window_offset=999999999999999999999999999999, verbose=False, alpha=0.01,
plot=False, meta=False):
"""
Creates a network tree based on the species tree
and the two leaves to be connected.
Inputs:
inheritance --- inputted tuple containing inheritance probability ex. (0.7, 0.3)
species_tree --- generated or inputted file or newick string
network_map --- inputted mapping of leaves where nodes will be added
Output:
network --- a newick string network with the added nodes.
"""
#decided to do a rename here
alignments = sequence_files
# read in dgen stat from file
# (have to wait for file to exist sometimes)
while not os.path.exists(stat_file):
time.sleep(1)
with(open(stat_file, "r")) as s:
lines = s.readlines()
taxa = eval(lines[0].split(None, 1)[1])
stat_species_tree = lines[1].split(None, 2)[2].replace("\n", "")
stat_species_network = lines[2].split(None, 2)[2].replace("\n", "")
outgroup = lines[3].split(None, 1)[1].replace("\n", "")
invariants = []
for oneInvLine in range(4,len(lines)):
this_line_invariant_group = eval(lines[oneInvLine].split(None, 6)[6])
invariants.append(this_line_invariant_group)
#increase = eval(lines[1].split(None, 2)[2])
#decrease = eval(lines[2].split(None, 2)[2])
#increase_resized = increase
#decrease_resized = decrease
#overall_coefficient = 1
#patterns_to_coeff = {}
# DONE READING IN STATISTIC FROM FILE, RUN THE STAT
#window_size = 5000
#window_offset = 5000
#verbose = True
#alpha = 0.01
#alignments.append(sequence_file)
alignments_to_windows_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, window_size,
window_offset, verbose, alpha)
# the lazy way to do alignments to d using same function and not having to rewrite a nearly identical function
alignments_to_DGEN = calculate_windows_to_DGEN(alignments, taxa, outgroup,
invariants, 999999999999999999999999999999,
999999999999999999999999999999, verbose, alpha)
for alignment in alignments:
alignments_to_DGEN[alignment] = alignments_to_DGEN[alignment][0]
# print stuff
s = ""
for alignment in alignments:
if verbose:
dgen2_dof, significant_dgen, dgen2_num_ignored, dgen2_chisq, l_pval_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = degrees of freedom, is significant?, num. of sites ignored, chi squared value, DGEN p value)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
s += "\n"
s += "(Verbose) Number Of Sites Ignored: {0}".format(dgen2_num_ignored) + "\n"
s += "(Verbose) Degrees Of Freedom: {0}".format(dgen2_dof) + "\n"
s += "(Verbose) ChiSquared Value: {0}".format(dgen2_chisq) + "\n"
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][4]) + "\n"
windowIndex += 1
else:
l_pval_dgen, significant_dgen = alignments_to_DGEN[alignment]
s += "\n"
s += alignment + ": " + "\n"
s += "\n"
s += "(format = DGEN p value, is significant?)"
s += "\n"
s += "Windows to D value: " + str(alignments_to_windows_to_DGEN[alignment]) + "\n"
s += "\n"
s += "Final Overall DGEN p value: {0}".format(l_pval_dgen) + "\n"
s += "Significant p value: {0}".format(significant_dgen) + "\n"
# finally do one more output of just window#,dgen val for easy plotting
s += "\n"
s += "For easy plotting of DGEN values:"
s += "\n"
windowIndex = 0
for windowEntryIndex in alignments_to_windows_to_DGEN[alignment]:
s += str(windowIndex) + "," + str(alignments_to_windows_to_DGEN[alignment][windowEntryIndex][0]) + "\n"
windowIndex += 1
print s
if plot:
plot_format
|
ting((alignments_to_DGEN, alignments_to_windows_to_DGEN), plot, meta)
|
return s
def calculate_windows_to_DGEN(alignments, taxa_order, outgroup, list_of_tree_and_net_invariants, window_size, window_offset,
verbose= False, alpha=0.01):
"""
Calculates the DGEN statistic for the given alignment
Input:
alignment --- a sequence alignment in phylip format
taxa_order --- the desired order of the taxa
patterns_of_interest --- a tuple containing the sets of patterns used for determining a statistic
window_size --- the desired window size
windw_offset --- the desired offset between windows
Output:
l_stat --- the L statistic value
windows_to_l --- a mapping of window indices to L statistic values
"""
# create a map that will map from all the patterns we care about to their counts
pattern_count_map = defaultdict(int)
for aLine in list_of_tree_and_net_invariants:
for aPatternGroup in aLine: # technically could skip the first one or just use the first one
for aPattern in aPatternGroup:
pattern_count_map[aPattern] = 0
# Separate the patterns of interest into their two terms
#terms1 = patterns_of_interest[0]
#terms2 = patterns_of_interest[1]
alignments_to_windows_to_d = {}
for alignment in alignments:
sequence_list = []
taxon_list = []
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Add each taxon to a list
taxon = line.split()[0]
taxon_list.append(taxon)
i = 0
num_windows = 0
if window_size > length_of_sequences:
num_windows = 1
window_size = length_of_sequences
else:
# Determine the total number of windows needed
while i + window_size - 1 < length_of_sequences:
i += window_offset
num_windows += 1
site_idx = 0
windows_to_l = {}
# Iterate over each window
for window in range(num_windows):
terms1_counts = defaultdict(int)
terms2_counts = defaultdict(int)
num_ignored = 0
# Iterate over the indices in each window
for window_idx in range(window_size):
# Map each taxa to the base at a given site
taxa_to_site = {}
# Create a set of the bases at a given site to determine if the site is biallelic
|
camilonova/django
|
tests/gis_tests/test_data.py
|
Python
|
bsd-3-clause
| 2,588
| 0.000386
|
"""
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import json
import os
from django.utils.functional import cached_property
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(__file__), 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple(tuplize(i) for i in seq)
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return {str(k): v for k, v in d.items()}
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj:
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, *, ext='shp', **kwargs):
# Shapefile is default extension, unless specified otherwise.
self.ds = get_ds_file(name, ext)
super().__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, *, coords=None, centroid=None, ext_ring_cs=None, **kwargs):
# Converting lists to tuples of certain keyword args
#
|
so coordinate test cases will match (JSON has no
# concept of tuple).
if coords:
self.coords = tuplize(coords)
if centroid:
self.centroid = tuple(centroid)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super().__init__(**kwargs)
class TestGeomSet:
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def _
|
_init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin:
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@cached_property
def geometries(self):
# Load up the test geometry data from fixture into global.
with open(os.path.join(TEST_DATA, 'geometries.json')) as f:
geometries = json.load(f)
return TestGeomSet(**strconvert(geometries))
|
mbonix/yapr
|
android/PyLocale.py
|
Python
|
mit
| 2,023
| 0.002472
|
""" A location-aware script to manage ringer volume """
__author__ = 'Marco Bonifacio <bonifacio.marco@gmail.com>'
__license__ = 'MIT License'
impo
|
rt android
import time
# Parameters
SSID = {'bonix-lan': 'casa',
'ZENIT SECURED WPA': 'lavoro'}
RINGER = {'casa': 5,
'lavoro': 2,
'sconosciuto': 5}
# Functions
def check_ssid(droid):
""" Check if wireless network SSID is known.
Args:
droid: an Android instance.
Returns:
a string representing a known or unknown
|
environment. """
state = 'sconosciuto'
try:
lwifi = droid.wifiGetScanResults().result
lssid = [w['ssid']for w in lwifi]
for s in lssid:
if s in SSID:
state = SSID[s]
except Exception, e:
droid.notify('PyLocale', 'Errore: {}'.format(e))
finally:
return(state)
def check_state(droid, state, stateold):
""" Check if environment has changed.
Args:
droid: an Android instance.
state: a string, the present state.
stateold: a string, the former state.
Returns:
a binary true if environment has changed. """
if state != stateold:
droid.vibrate()
if state != 'sconosciuto':
droid.makeToast('Sei a {}'.format(state))
else:
droid.makeToast('Sei uscito da {}'.format(stateold))
return(True)
else:
return(False)
def set_ringer(droid, state):
""" Set the ringer volume depending on state.
Args:
droid: an Android instance.
state: a string, the present state.
Returns:
nothing. """
droid.setRingerVolume(RINGER[state])
droid.makeToast('Volume: {}'.format(RINGER[state]))
if __name__ == '__main__':
droid = android.Android()
state = 'sconosciuto'
while True:
stateold = state
state = check_ssid(droid)
changed = check_state(droid, state, stateold)
if changed is True:
set_ringer(droid, state)
time.sleep(300)
|
sociam/indx
|
apps/fitbit/bin/fitbit/fitbit_intraday.py
|
Python
|
agpl-3.0
| 3,324
| 0.007822
|
from fitbit import Fitbit
from datetime import date, datetime, time, timedelta
import json
# '''merges two response dicts based on the keys'''
# def combine(a, b):
# c = {}
# for key in a.keys():
# if (key.endswith('-intraday')):
# c[key] = a[key]
# c[key]['dataset'].extend(b[key]['dataset'])
# else:
# c[key] = a[key]
# c[key].extend(b[key])
# return c
class FitbitIntraDay():
def __init__(self, fitbit):
self.fitbit = fitbit;
if (self.fitbit.token == None):
self.fitbit.get_token()
def get_intraday_time_series(self, resource_path, from_datetime, to_datetime, format='json'):
# from_date and to_date of type datetime
# use fitbit_timeseries helper functions for a list of possible resource paths
if from_datetime is None:
return []
if to_datetime is None :
return [self.get_day_time_series(resource_path, from_datetime.date(), format)]
if (to_datetime.date() == from_datetime.date()):
return [self.get_time_interval_time_series(resource_path, to_datetime.date(), from_datetime.time(), to_datetime.time(), format)]
else:
out = [self.get_time_interval_time_series(resource_path, from_datetime.date(), from_datetime.time(), time(23, 59), format)]
delta = timedelta(days=1)
next = from_datetime.date() + delta
while (next < to_datetime.date()):
|
out.append(self.get_day_time_series(resource_path, next, format))
next = next + delta
out.append(self.get_time_interval_time_series(resource_path, to_datetime.date(), time(0, 0), to_datetime.time(), format))
return out
def g
|
et_day_time_series(self, resource_path, date=date.today(), format='json'):
url = "/1/user/-/{0}/date/{1}/1d/1min.{2}".format(resource_path, date.isoformat(), format)
data = self.fitbit.call_get_api(url)
return json.loads(data)
def get_time_interval_time_series(self, resource_path, date=date.today(), from_time=time(0, 0), to_time=time(23,59), format='json'):
url = "/1/user/-/{0}/date/{1}/1d/1min/time/{2}/{3}.{4}".format(resource_path, date.isoformat(), from_time.strftime("%H:%M"), to_time.strftime("%H:%M"), format)
data = self.fitbit.call_get_api(url)
return json.loads(data)
def get_calories(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/calories", from_datetime, to_datetime, format)
def get_steps(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/steps", from_datetime, to_datetime, format)
def get_distance(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/distance", from_datetime, to_datetime, format)
def get_floors(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/floors", from_datetime, to_datetime, format)
def get_elevation(self, from_datetime, to_datetime=None, format='json'):
return self.get_intraday_time_series("activities/elevation", from_datetime, to_datetime, format)
|
joelagnel/ns-3
|
bindings/python/apidefs/gcc-LP64/ns3_module_energy.py
|
Python
|
gpl-2.0
| 54,425
| 0.016628
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer [class]
module.add_class('DeviceEnergyModelContainer')
## energy-model-helper.h: ns3::DeviceEnergyModelHelper [class]
module.add_class('DeviceEnergyModelHelper', allow_subclassing=True)
## energy-model-helper.h: ns3::EnergySourceHelper [class]
module.add_class('EnergySourceHelper', allow_subclassing=True)
## rv-battery-model-helper.h: ns3::RvBatteryModelHelper [class]
module.add_class('RvBatteryModelHelper', parent=root_module['ns3::EnergySourceHelper'])
## wifi-radio-energy-model-helper.h: ns3::WifiRadioEnergyModelHelper [class]
module.add_class('WifiRadioEnergyModelHelper', parent=root_module['ns3::DeviceEnergyModelHelper'])
## wifi-radio-energy-model.h: ns3::WifiRadioEnergyModelPhyListener [class]
module.add_class('WifiRadioEnergyModelPhyListener', parent=root_module['ns3::WifiPhyListener'])
## basic-energy-source-helper.h: ns3::BasicEnergySourceHelper [class]
module.add_class('BasicEnergySourceHelper', parent=root_module['ns3::EnergySourceHelper'])
## device-energy-model.h: ns3::DeviceEnergyModel [class]
module.add_class('DeviceEnergyModel', parent=root_module['ns3::Object'])
## energy-source.h: ns3::EnergySource [class]
module.add_class('EnergySource', parent=root_module['ns3::Object'])
## energy-source-container.h: ns3::EnergySourceContainer [class]
module.add_class('EnergySourceContainer', parent=root_module['ns3::Object'])
## li-ion-energy-source.h: ns3::LiIonEnergySource [class]
module.add_class('LiIonEnergySource', parent=root_module['ns3::EnergySource'])
## rv-battery-model.h: ns3::RvBatteryModel [class]
module.add_class('RvBatteryModel', parent=root_module['ns3::EnergySource'])
## simple-device-energy-model.h: ns3::SimpleDeviceEnergyModel [class]
module.add_class('SimpleDeviceEnergyModel', parent=root_module['ns3::DeviceEnergyModel'])
## wifi-radio-energy-model.h: ns3::WifiRadioEnergyModel [class]
module.add_class('WifiRadioEnergyModel', parent=root_module['ns3::DeviceEnergyModel'])
## basic-energy-source.h: ns3::BasicEnergySource [class]
module.add_class('BasicEnergySource', parent=root_module['ns3::EnergySource'])
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(mod
|
ule):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns
|
3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3DeviceEnergyModelContainer_methods(root_module, root_module['ns3::DeviceEnergyModelContainer'])
register_Ns3DeviceEnergyModelHelper_methods(root_module, root_module['ns3::DeviceEnergyModelHelper'])
register_Ns3EnergySourceHelper_methods(root_module, root_module['ns3::EnergySourceHelper'])
register_Ns3RvBatteryModelHelper_methods(root_module, root_module['ns3::RvBatteryModelHelper'])
register_Ns3WifiRadioEnergyModelHelper_methods(root_module, root_module['ns3::WifiRadioEnergyModelHelper'])
register_Ns3WifiRadioEnergyModelPhyListener_methods(root_module, root_module['ns3::WifiRadioEnergyModelPhyListener'])
register_Ns3BasicEnergySourceHelper_methods(root_module, root_module['ns3::BasicEnergySourceHelper'])
register_Ns3DeviceEnergyModel_methods(root_module, root_module['ns3::DeviceEnergyModel'])
register_Ns3EnergySource_methods(root_module, root_module['ns3::EnergySource'])
register_Ns3EnergySourceContainer_methods(root_module, root_module['ns3::EnergySourceContainer'])
register_Ns3LiIonEnergySource_methods(root_module, root_module['ns3::LiIonEnergySource'])
register_Ns3RvBatteryModel_methods(root_module, root_module['ns3::RvBatteryModel'])
register_Ns3SimpleDeviceEnergyModel_methods(root_module, root_module['ns3::SimpleDeviceEnergyModel'])
register_Ns3WifiRadioEnergyModel_methods(root_module, root_module['ns3::WifiRadioEnergyModel'])
register_Ns3BasicEnergySource_methods(root_module, root_module['ns3::BasicEnergySource'])
return
def register_Ns3DeviceEnergyModelContainer_methods(root_module, cls):
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'arg0')])
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer() [constructor]
cls.add_constructor([])
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::Ptr<ns3::DeviceEnergyModel> model) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(std::string modelName) [constructor]
cls.add_constructor([param('std::string', 'modelName')])
## device-energy-model-container.h: ns3::DeviceEnergyModelContainer::DeviceEnergyModelContainer(ns3::DeviceEnergyModelContainer const & a, ns3::DeviceEnergyModelContainer const & b) [constructor]
cls.add_constructor([param('ns3::DeviceEnergyModelContainer const &', 'a'), param('ns3::DeviceEnergyModelContainer const &', 'b')])
## device-energy-model-container.h: void ns3::DeviceEnergyModelContainer::Add(ns3::DeviceEnergyModelContainer container) [member function]
cls.add_method('Add',
'void',
[param('ns3::DeviceEnergyModelContainer', 'container')])
## device-energy-model-container.h: void ns3::DeviceEnergyModelContainer::Add(ns3::Ptr<ns3::DeviceEnergyModel> model) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::DeviceEnergyModel >', 'model')])
## device-energy-model-container.h: void ns3::DeviceEnergyModelContainer::Add(std::string modelName) [member function]
cls.add_method('A
|
Nikea/VisTrails
|
vistrails/db/versions/v0_9_3/persistence/xml/auto_gen.py
|
Python
|
bsd-3-clause
| 66,772
| 0.005571
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""generated automatically by auto_dao.py"""
from vistrails.core.system import get_elementtree_library
from xml_dao import XMLDAO
from vistrails.db.versions.v0_9_3.domain import *
ElementTree = get_elementtree_library()
class DBPortSpecXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'portSpec':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('type', None)
type = self.convertFromStr(data, 'str')
data = node.get('spec', None)
spec = self.convertFromStr(data, 'str')
obj = DBPortSpec(id=id,
name=name,
type
|
=type,
spec=spec)
obj.is_d
|
irty = False
return obj
def toXML(self, portSpec, node=None):
if node is None:
node = ElementTree.Element('portSpec')
# set attributes
node.set('id',self.convertToStr(portSpec.db_id, 'long'))
node.set('name',self.convertToStr(portSpec.db_name, 'str'))
node.set('type',self.convertToStr(portSpec.db_type, 'str'))
node.set('spec',self.convertToStr(portSpec.db_spec, 'str'))
return node
class DBModuleXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'module':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('cache', None)
cache = self.convertFromStr(data, 'int')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
data = node.get('namespace', None)
namespace = self.convertFromStr(data, 'str')
data = node.get('package', None)
package = self.convertFromStr(data, 'str')
data = node.get('version', None)
version = self.convertFromStr(data, 'str')
data = node.get('tag', None)
tag = self.convertFromStr(data, 'str')
location = None
functions = []
annotations = []
portSpecs = []
# read children
for child in node.getchildren():
if child.tag == 'location':
_data = self.getDao('location').fromXML(child)
location = _data
elif child.tag == 'function':
_data = self.getDao('function').fromXML(child)
functions.append(_data)
elif child.tag == 'annotation':
_data = self.getDao('annotation').fromXML(child)
annotations.append(_data)
elif child.tag == 'portSpec':
_data = self.getDao('portSpec').fromXML(child)
portSpecs.append(_data)
elif child.text is None or child.text.strip() == '':
pass
else:
print '*** ERROR *** tag = %s' % child.tag
obj = DBModule(id=id,
cache=cache,
name=name,
namespace=namespace,
package=package,
version=version,
tag=tag,
location=location,
functions=functions,
annotations=annotations,
portSpecs=portSpecs)
obj.is_dirty = False
return obj
def toXML(self, module, node=None):
if node is None:
node = ElementTree.Element('module')
# set attributes
node.set('id',self.convertToStr(module.db_id, 'long'))
node.set('cache',self.convertToStr(module.db_cache, 'int'))
node.set('name',self.convertToStr(module.db_name, 'str'))
node.set('namespace',self.convertToStr(module.db_namespace, 'str'))
node.set('package',self.convertToStr(module.db_package, 'str'))
node.set('version',self.convertToStr(module.db_version, 'str'))
node.set('tag',self.convertToStr(module.db_tag, 'str'))
# set elements
location = module.db_location
if location is not None:
childNode = ElementTree.SubElement(node, 'location')
self.getDao('location').toXML(location, childNode)
functions = module.db_functions
for function in functions:
childNode = ElementTree.SubElement(node, 'function')
self.getDao('function').toXML(function, childNode)
annotations = module.db_annotations
for annotation in annotations:
childNode = ElementTree.SubElement(node, 'annotation')
self.getDao('annotation').toXML(annotation, childNode)
portSpecs = module.db_portSpecs
for portSpec in portSpecs:
childNode = ElementTree.SubElement(node, 'portSpec')
self.getDao('portSpec').toXML(portSpec, childNode)
return node
class DBTagXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def getDao(self, dao):
return self.daoList[dao]
def fromXML(self, node):
if node.tag != 'tag':
return None
# read attributes
data = node.get('id', None)
id = self.convertFromStr(data, 'long')
data = node.get('name', None)
name = self.convertFromStr(data, 'str')
obj = DBTag(id=id,
name=name)
obj.is_dirty = False
return obj
def toXML(self, tag, node=None):
if node is None:
node = ElementTree.Element('tag')
# set attributes
node.set('id',self.convertToStr(tag.db_id, 'long'))
node.set('name',self.convertToStr(tag.db_name, 'str'))
return node
class DBPortXMLDAOBase(XMLDAO):
def __init__(self, daoList):
self.daoList = daoList
def
|
jiadaizhao/LeetCode
|
1401-1500/1451-Rearrange Words in a Sentence/1451-Rearrange Words in a Sentence.py
|
Python
|
mit
| 513
| 0.001949
|
import collections
class Solution:
def arrangeWords(self, text: str) -> str:
words = text.split()
table = collections.defaultdict(list)
for word in words:
table[len(word)].append(word)
result = []
for key in sorted(table):
result.extend(table[key])
|
return ' '.join(result).capitalize()
# Sort is stable
class Solution2:
def arrangeWords(self, text: str) -> str
|
:
return ' '.join(sorted(text.split(), key=len)).capitalize()
|
SoftwareDefinedBuildings/bw2-contrib
|
driver/emu2/driver.py
|
Python
|
gpl-3.0
| 2,380
| 0.007143
|
# EMU code from https://github.com/rainforestautomation/Emu-Serial-API
from emu import *
import sys
import json
import msgpack
from xbos import get_client
from bw2python.bwtypes import PayloadObject
import time
with open("params.json") as f:
try:
params = json.loads(f.read())
except ValueError as e:
print "Invalid parameter file"
sys.exit(1)
emu_instance = emu(params["port"])
emu_instance.start_serial()
# get network info
emu_instance.get_network_info()
while not hasattr(emu_instance, 'NetworkInfo'):
time.sleep(10)
macid = emu_instance.NetworkInfo.DeviceMacId
c = get_client(agent=params["agent"], entity=params["entity"])
PONUM = (2,0,9,1)
baseuri = params["baseuri"]
signaluri = "{0}/s.emu2/{1}/i.meter/signal/meter".format(baseuri, macid)
print ">",signaluri
def send_message(msg):
"""
msg has keys:
current_demand
current_price
current_tier
current_summation_delivered
current_summation_received
"""
po = PayloadObject(PONUM, None, msgpack.packb(msg))
c.publish(signaluri, payload_objects=(po,))
msg = {}
while True:
#print emu_instance.get_instantaneous_demand()
emu_instance.get_current_summation_delivered()
emu_instance.get_instantaneous_demand('Y')
emu_instance.get_current_price('Y')
time.sleep(10)
msg['current_time'] = time.time()#int(pc.TimeStamp) + 00:00:00 1 Jan 2000
# handle PriceCluster
if hasattr(emu_instance, "PriceCluster"):
pc = emu_instance.PriceCluster
print dir(emu_instance.PriceCluster)
msg['current_price'] = float(int(pc.Price, 16)) / (10**int(pc.TrailingDigits,16))
msg['current_tier'] = int(pc.Tier, 16)
# handle demand
if hasattr(emu_instance, "InstantaneousDemand"):
d = emu_instance.InstantaneousDemand
msg['current_demand'] = i
|
nt(d.Demand, 16)
print dir(emu_instance)
# handle current summation
if hasattr(emu_instance, "CurrentSummationDelivered"):
d = emu_instance.CurrentSummationDelivered
multiplier = int(d.Multiplier, 16)
divisor = float(int(d.Divisor, 16))
msg['current_summation_delivered'] = int(d.SummationDelivered, 16) * multiplier / divis
|
or
msg['current_summation_received'] = int(d.SummationReceived, 16) * multiplier / divisor
send_message(msg)
emu_instance.stop_serial()
|
jolyonb/edx-platform
|
openedx/core/djangoapps/schedules/content_highlights.py
|
Python
|
agpl-3.0
| 4,066
| 0.000492
|
"""
Contains methods for accessing weekly course highlights. Weekly highlights is a
schedule experience built on the Schedules app.
"""
from __future__ import absolute_import
import logging
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from openedx.core.djangoapps.schedules.config import COURSE_UPDATE_WAFFLE_FLAG
from openedx.core.djangoapps.schedules.exceptions import CourseUpdateDoesNotExist
from openedx.core.lib.request_utils import get_request_or_stub
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
def course_has_highlights(course_key):
"""
Does the course have any highlights for any section/week in it?
This ignores access checks, since highlights may be lurking in currently
inaccessible content.
"""
try:
course = _get_course_with_highlights(course_key)
except CourseUpdateDoesNotExist:
return False
else:
highlights_are_available = any(
section.highlights
for section in course.get_children()
if not section.hide_from_toc
)
if not highlights_are_available:
log.error(
"Course team enabled highlights and provided no highlights."
)
return highlights_are_available
def get_week_highlights(user, course_key, week_num):
"""
Get highlights (list of unicode strings) for a given week.
week_num starts at 1.
Raises:
CourseUpdateDoesNotExist: if highlights do not exist for
the requested week_num.
"""
course_descriptor = _get_course_with_highlights(course_key)
course_module = _get_course_module(course_descriptor, user)
sections_with_highlights = _get_sections_with_highlights(course_module)
highlights = _get_highlights_for_week(
sections_with_highlights,
week_num,
course_key,
)
return highlights
def _get_course_with_highlights(course_key):
# pylint: disable=missing-docstring
if not COURSE_UPDATE_WAFFLE_FLAG.is_enabled(course_key):
raise CourseUpdateDoesNotExist(
u"%s Course Update Messages waffle flag is disabled.",
course_key,
)
course_descriptor = _get_course_descriptor(course_key)
if not course_descriptor.highlights_enabled_for_messaging:
raise CourseUpdateDoesNotExist(
u"%s Course Update Messages are disabled.",
course_key,
)
return course_descriptor
|
def _get_course_descriptor(course_key):
course_descriptor = modulestore().get_course(course_key, depth=1)
if course_descriptor is None:
raise CourseUpdateDoesNotExist(
u"Course {} not found.".format(course_key)
)
return course_descriptor
def _get_course_module(course_descriptor, user):
# Fake a request to fool parts of the courseware that want to inspect it.
request = get_request_or_stub()
request.user = user
|
# Now evil modulestore magic to inflate our descriptor with user state and
# permissions checks.
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_descriptor.id, user, course_descriptor, depth=1, read_only=True,
)
return get_module_for_descriptor(
user, request, course_descriptor, field_data_cache, course_descriptor.id, course=course_descriptor,
)
def _get_sections_with_highlights(course_module):
return [
section for section in course_module.get_children()
if section.highlights and not section.hide_from_toc
]
def _get_highlights_for_week(sections, week_num, course_key):
# assume each provided section maps to a single week
num_sections = len(sections)
if not (1 <= week_num <= num_sections):
raise CourseUpdateDoesNotExist(
u"Requested week {} but {} has only {} sections.".format(
week_num, course_key, num_sections
)
)
section = sections[week_num - 1]
return section.highlights
|
TerryHowe/ansible-modules-hashivault
|
ansible/modules/hashivault/hashivault_approle_role_get.py
|
Python
|
mit
| 1,659
| 0.001206
|
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_ut
|
ils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role get module
description:
- Module to get a approle rol
|
e from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_get:
name: 'ashley'
register: 'vault_approle_role_get'
- debug: msg="Role is {{vault_approle_role_get.role}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_approle_role_get(params):
name = params.get('name')
client = hashivault_auth_client(params)
result = client.get_role(name, mount_point=params.get('mount_point'))
return {'role': result}
if __name__ == '__main__':
main()
|
mattwaite/CanvasStoryArchive
|
stories/migrations/0001_initial.py
|
Python
|
mit
| 1,746
| 0.001718
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
|
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity_name', models.CharField(max_length=255)),
('entity_name_slug', models.SlugField()),
],
options={
},
bases=(models.Model,),
),
|
migrations.CreateModel(
name='EntityType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity_type', models.CharField(max_length=255)),
('entity_type_slug', models.SlugField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Story',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('headline', models.CharField(max_length=255)),
('byline', models.CharField(max_length=255)),
('pubdate', models.DateTimeField()),
('description', models.TextField()),
('full_text', models.TextField()),
('word_count', models.IntegerField()),
('entities', models.ManyToManyField(to='stories.Entity')),
],
options={
},
bases=(models.Model,),
),
]
|
xanthospap/chaos-ngpt
|
mipy/class-static-mths.py
|
Python
|
gpl-3.0
| 2,368
| 0.010557
|
#! /usr/bin/python
class A1(object):
def method(*argv): return argv
a = A1()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) ->', a.method(1,2,3)
print '''
!! Note that the \'a\' instance is passed (implicitely) as the first
!! function parameter (something like <__main__.A1 object at 0x7f...>
'''
''' The following will throw as A1.method is a bound method (i.e. non-static)
of the A1 class:
TypeError: unbound method method() must be called with A1 instance as
first argument (got int instance instead)
'''
# print 'Call A.method(1,2,3) ->', A1.method(1,2,3)
''' A static method does not receive an implicit first argument.
It (i.e. the static method) can be called either on the class
(such as A.method()) or on an instance (such as A().method()). The
ins
|
tance is ignored except for its class.
'''
class A2(object):
@staticmethod
def method(*argv): return argv
a = A2()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) ->', a.method(1,2,3)
print 'Call A.method(1,2,3)
|
->', A2.method(1,2,3) ## static call
print '''
!! Note that no class instance is is passed to the call
'''
''' So in a normal (class) bound method call, the instance is passed implicitely
as the first argument, whereas for a static method no implicit arguments are
passed; the method is invoked via scoping the class name.
There is a third option, where we do pass an implicit first argument, but
NOT an instance; instead the argument is the class type itself. That is,
A class method receives the class as implicit first argument, just like an
instance method receives the instance.
It can be called either on the class (such as A3.method()) or on an
instance (such as A3().method()). The instance is ignored except for its
class. If a class method is called for a derived class, the derived class
object is passed as the implied first argument.
'''
class A3(object):
@classmethod
def method(*argv): return argv
a = A3()
print 'Call a.method() ->', a.method()
print 'Call a.method(1,2,3) ->', a.method(1,2,3)
print 'Call A.method(1,2,3) ->', A3.method(1,2,3)
print '''
!! Note that The class object (i.e. something like <class '__main__.A3'>)
!! is (implicitely) passed as the first argument.
'''
|
haoyuchen1992/modular-file-renderer
|
tests/server/handlers/test_render.py
|
Python
|
apache-2.0
| 344
| 0
|
import mfr
import json
from tests import utils
from tornado import testing
class TestRenderHandler(u
|
tils.HandlerTestCase):
@testing.gen_test
def test_options_skips_prepare(self):
# Would crash b/c lack of mocks
yield self.http_clie
|
nt.fetch(
self.get_url('/render'),
method='OPTIONS'
)
|
mitodl/bootcamp-ecommerce
|
applications/views.py
|
Python
|
bsd-3-clause
| 9,919
| 0.001109
|
"""Views for bootcamp applications"""
from collections import OrderedDict
import re
from django.db.models import Count, Subquery, OuterRef, IntegerField, Prefetch, Q
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.filters import OrderingFilter
from rest_framework.exceptions import MethodNotAllowed, ValidationError
from rest_framework.generics import GenericAPIView
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from rest_framework_serializer_extensions.views import SerializerExtensionsAPIViewMixin
from mitol.common.utils import now_in_utc
from applications.constants import SUBMISSION_STATUS_SUBMITTED, REVIEWABLE_APP_STATES
from applications.serializers import (
BootcampApplicationDetailSerializer,
BootcampApplicationSerializer,
SubmissionReviewSerializer,
)
from applications.api import get_or_create_bootcamp_application
from applications.filters import ApplicationStepSubmissionFilterSet
from applications.models import (
ApplicantLetter,
ApplicationStepSubmission,
BootcampApplication,
)
from cms.models import LetterTemplatePage
from ecommerce.models import Order
from klasses.models import BootcampRun
from main.permissions import UserIsOwnerPermission, UserIsOwnerOrAdminPermission
from main.utils import serializer_date_format
class BootcampApplicationViewset(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet,
):
"""
View for fetching users' serialized bootcamp application(s)
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, UserIsOwnerOrAdminPermission)
owner_field = "user"
def get_queryset(self):
if self.action == "retrieve":
return BootcampApplication.objects.prefetch_state_data()
else:
return (
BootcampApplication.objects.prefetch_related(
Prefetch(
"orders", queryset=Order.objects.filter(status=Order.FULFILLED)
)
)
.filter(user=self.request.user)
.select_related("bootcamp_run__bootcamprunpage", "user")
.prefetch_related("bootcamp_run__certificates", "user__enrollments")
.order_by("-created_on")
)
def get_serializer_context(self):
added_context = {}
if self.action == "list":
added_context = {"include_page": True, "filtered_orders": True}
return {**super().get_serializer_context(), **added_context}
def get_serializer_class(self):
if self.action == "retrieve":
return BootcampApplicationDetailSerializer
elif self.action in {"list", "create"}:
return BootcampApplicationSerializer
raise MethodNotAllowed("Cannot perform the requested action.")
def create(self, request, *args, **kwargs):
bootcamp_run_id = request.data.get("bootcamp_run_id")
if not bootcamp_run_id:
raise ValidationError("Bootcamp run ID required.")
if not BootcampRun.objects.filter(id=bootcamp_run_id).exists():
return Response(
data={"error": "Bootcamp does not exist"},
status=status.HTTP_404_NOT_FOUND,
)
application, created = get_or_create_bootcamp_application(
user=request.user, bootcamp_run_id=bootcamp_run_id
)
serializer_cls = self.get_serializer_class()
return Response(
data=serializer_cls(instance=application).data,
status=(status.HT
|
TP_201_CREATED if created else status.HTTP_200_OK),
)
class ReviewSubmissionPagination(LimitOffsetPagination):
"""Pagination class for ReviewSubmissionViewSet"""
default_limit = 10
max_limit = 1000
facets = {}
def paginate_queryset(self, queryset, request, view=None):
"""Paginate the queryset"""
s
|
elf.facets = self.get_facets(queryset)
return super().paginate_queryset(queryset, request, view=view)
def get_paginated_response(self, data):
"""Return a paginationed response, including facets"""
return Response(
OrderedDict(
[
("count", self.count),
("next", self.get_next_link()),
("previous", self.get_previous_link()),
("results", data),
("facets", self.facets),
]
)
)
def get_facets(self, queryset):
"""Return a dictionary of facets"""
statuses = (
queryset.values("review_status")
.annotate(count=Count("review_status"))
.order_by("count")
)
qs = (
queryset.values("bootcamp_application__bootcamp_run")
.filter(bootcamp_application__bootcamp_run=OuterRef("pk"))
.order_by()
.annotate(count=Count("*"))
.values("count")
)
bootcamp_runs = (
BootcampRun.objects.values("id", "title", "start_date", "end_date")
.annotate(count=Subquery(qs, output_field=IntegerField()))
.filter(count__gte=1)
.distinct()
)
return {"review_statuses": statuses, "bootcamp_runs": bootcamp_runs}
class ReviewSubmissionViewSet(
SerializerExtensionsAPIViewMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
"""
Admin view for managing application submissions
"""
authentication_classes = (SessionAuthentication,)
serializer_class = SubmissionReviewSerializer
permission_classes = (IsAdminUser,)
queryset = (
ApplicationStepSubmission.objects.filter(
Q(submission_status=SUBMISSION_STATUS_SUBMITTED)
& Q(bootcamp_application__state__in=REVIEWABLE_APP_STATES)
& Q(bootcamp_application__bootcamp_run__end_date__gte=now_in_utc())
)
.select_related(
"bootcamp_application__user__profile",
"bootcamp_application__user__legal_address",
)
.prefetch_related("content_object")
)
filterset_class = ApplicationStepSubmissionFilterSet
filter_backends = [DjangoFilterBackend, OrderingFilter]
pagination_class = ReviewSubmissionPagination
ordering_fields = ["created_on"]
ordering = "created_on"
class UploadResumeView(GenericAPIView):
"""
View for uploading resume and linkedin URL
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated, UserIsOwnerPermission)
lookup_field = "pk"
owner_field = "user"
queryset = BootcampApplication.objects.all()
serializer_class = BootcampApplicationDetailSerializer
def post(self, request, *args, **kwargs):
"""
Update the application with resume and/or linkedin URL
"""
application = self.get_object()
linkedin_url = request.data.get("linkedin_url")
resume_file = request.FILES.get("file")
if linkedin_url is None and resume_file is None and not application.resume_file:
raise ValidationError("At least one form of resume is required.")
if linkedin_url:
self.validate_linkedin_url(linkedin_url)
application.add_resume(resume_file=resume_file, linkedin_url=linkedin_url)
# when state transition happens need to save manually
application.save()
return Response(
{
"resume_url": (
application.resume_file.url if application.resume_file else None
),
"linkedin_url": application.linkedin_url,
"resume_upload_date": serializer_date_format(
|
b3j0f/utils
|
b3j0f/utils/ut.py
|
Python
|
mit
| 5,685
| 0.000176
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Unit tests tools."""
from unittest import TestCase
from six import string_types, PY2
from .version import PY26
from re import match
__all__ = ['UTCase']
def _subset(subset, superset):
"""True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
"""
result = True
for k in subset:
result = k in superset and subset[k] == superset[k]
if not result:
break
ret
|
urn result
class UTCase(TestCase):
"""Class which enrichs TestC
|
ase with python version compatibilities."""
def __init__(self, *args, **kwargs):
super(UTCase, self).__init__(*args, **kwargs)
if PY2: # python 3 compatibility
if PY26: # python 2.7 compatibility
def assertIs(self, first, second, msg=None):
return self.assertTrue(first is second, msg=msg)
def assertIsNot(self, first, second, msg=None):
return self.assertTrue(first is not second, msg=msg)
def assertIn(self, first, second, msg=None):
return self.assertTrue(first in second, msg=msg)
def assertNotIn(self, first, second, msg=None):
return self.assertTrue(first not in second, msg=msg)
def assertIsNone(self, expr, msg=None):
return self.assertTrue(expr is None, msg=msg)
def assertIsNotNone(self, expr, msg=None):
return self.assertFalse(expr is None, msg=msg)
def assertIsInstance(self, obj, cls, msg=None):
return self.assertTrue(isinstance(obj, cls), msg=msg)
def assertNotIsInstance(self, obj, cls, msg=None):
return self.assertTrue(not isinstance(obj, cls), msg=msg)
def assertGreater(self, first, second, msg=None):
return self.assertTrue(first > second, msg=msg)
def assertGreaterEqual(self, first, second, msg=None):
return self.assertTrue(first >= second, msg=msg)
def assertLess(self, first, second, msg=None):
self.assertTrue(first < second, msg=msg)
def assertLessEqual(self, first, second, msg=None):
return self.assertTrue(first <= second, msg=msg)
def assertRegexpMatches(self, text, regexp, msg=None):
return self.assertTrue(
match(regexp, text) if isinstance(regexp, string_types)
else regexp.search(text),
msg=msg
)
def assertNotRegexpMatches(self, text, regexp, msg=None):
return self.assertIsNone(
match(regexp, text) if isinstance(regexp, string_types)
else regexp.search(text),
msg=msg
)
def assertItemsEqual(self, actual, expected, msg=None):
return self.assertEqual(
sorted(actual), sorted(expected), msg=msg
)
def assertDictContainsSubset(self, expected, actual, msg=None):
return self.assertTrue(_subset(expected, actual), msg=msg)
def assertCountEqual(self, first, second, msg=None):
return self.assertEqual(len(first), len(second), msg=msg)
def assertRegex(self, text, regexp, msg=None):
return self.assertRegexpMatches(text, regexp, msg)
def assertNotRegex(self, text, regexp, msg=None):
return self.assertNotRegexpMatches(text, regexp, msg)
else: # python 2 compatibility
def assertRegexpMatches(self, *args, **kwargs):
return self.assertRegex(*args, **kwargs)
def assertNotRegexpMatches(self, *args, **kwargs):
return self.assertNotRegex(*args, **kwargs)
def assertItemsEqual(self, actual, expected, msg=None):
return self.assertEqual(sorted(actual), sorted(expected), msg=msg)
def assertCountEqual(self, actual, expected, msg=None):
return self.assertEqual(sorted(actual), sorted(expected), msg=msg)
def assertDictContainsSubset(self, expected, actual, msg=None):
return self.assertTrue(_subset(expected, actual), msg=msg)
|
BiuroCo/mega
|
bindings/doc/java/sphinx/source/conf.py
|
Python
|
bsd-2-clause
| 11,589
| 0.006385
|
# -*- coding: utf-8 -*-
#
# javauserguide documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 21 21:46:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'javauserguide'
copyright = u'2015, Patrick Baird'
author = u'Patrick Baird'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'V0.1'
# The full version, including alpha/beta/rc tags.
release = 'V0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strf
|
time format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, m
|
aps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'javauserguidedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'javauserguide.tex', u'javauserguide Documentation',
u'Patrick Baird', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to appen
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/tcm/v20210413/tcm_client.py
|
Python
|
mit
| 3,255
| 0.002476
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.tcm.v20210413 import models
class TcmClient(AbstractClient):
_apiVersion = '2021-04-13'
_endpoint = 'tcm.tencentcloudapi.com'
_service = 'tcm'
def DescribeMesh(self, request):
"""查询网格详情
|
:param request: Request instance for DescribeMesh.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshResponse`
"""
try:
param
|
s = request._serialize()
body = self.call("DescribeMesh", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeMeshList(self, request):
"""查询网格列表
:param request: Request instance for DescribeMeshList.
:type request: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListRequest`
:rtype: :class:`tencentcloud.tcm.v20210413.models.DescribeMeshListResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeMeshList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeMeshListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
cfelton/myhdl_exercises
|
support/mysigs.py
|
Python
|
mit
| 2,461
| 0.002438
|
from __future__ import division
import myhdl
from myhdl import instance, delay
ClockList = []
class Clock(myhdl.SignalType):
def __init__(self, val, frequency=1, timescale='1ns'):
self._frequency = frequency
self._period = 1/frequency
self._timescale = timescale
self._hticks = 0
self._set_hticks()
myhdl.SignalType.__init__(self, bool(val))
ClockList.append(self)
@property
def timescale(self):
return self._timescale
@timescale.setter
def
|
timescale(self, t):
self._timescale = t
@property
def frequency(self):
return self._frequency
@frequenc
|
y.setter
def frequency(self, f):
self._frequency = f
self._period = 1/f
self._set_hticks()
@property
def period(self):
return self._period
def _set_hticks(self):
# self._nts = self._convert_timescale(self._timescale)
# self._hticks = int(round(self._period/self._nts))
self._hticks = 5
def _convert_timescale(self, ts):
# @todo: need to complete this, ts is in the form
# "[0-9]*["ms","us","ns","ps"], parse the text
# format and retrieve a numerical value
# separate the numerical and text
nts = 1e9
return nts
def gen(self, hticks=None):
if hticks is None:
hticks = self._hticks
else:
self._hticks = hticks
# print('hticks %d'%(hticks))
@instance
def gclock():
self.next = False
while True:
yield delay(hticks)
self.next = not self.val
return gclock
class Reset(myhdl.ResetSignal):
def __init__(self, val, active, async):
myhdl.ResetSignal.__init__(self, val, active, async)
def pulse(self, delays=10):
if isinstance(delays, int):
self.next = self.active
yield delay(delays)
self.next = not self.active
elif isinstance(delays, tuple):
assert len(delays) in (1, 2, 3), "Incorrect number of delays"
self.next = not self.active if len(delays) == 3 else self.active
for dd in delays:
yield delay(dd)
self.next = not self.val
self.next = not self.active
else:
raise ValueError("{} type not supported".format(type(delays)))
|
doirisks/dori
|
models/10.1016:S0140-6736(09)60443-8/model.py
|
Python
|
gpl-3.0
| 3,494
| 0.010876
|
"""
model.py
by Ted Morin
contains a function to predict 10-year Atrial Fibrilation risks using beta coefficients from
10.1016:S0140-6736(09)60443-8
2010 Development of a Risk Score for Atrial Fibrillation in the Community
Framingham Heart Study
translated and optimized from FHS online risk calculator's javascript
function expects parameters of
"Male Sex" "Age" "BMI" "Systolic BP" "Antihypertensive Medication Use" "PR Interval" "Sig. Murmur" "Prev Heart Fail"
years kg/m^2 mm Hg mSec
bool int/float int/float int/float bool int/float bool bool
"""
"""
# originally part of the function, calculates xbar_value
xbar_values = np.array([
0.4464, # gender
60.9022, # age
26.2861, # bmi
136.1674, # sbp
0.2413, # hrx
16.3901, # pr_intv
0.0281, # vhd
0.0087, # hxchf
3806.9000, # age2
1654.6600, # gender_age2
1.8961, # age_vhd
0.6100 # age_hxchf
])
xbar_value = np.dot(xbar_values,betas) # this constant should be hard coded like s0!
# (and now it is)
"""
def model(ismale, age, bmi, sbp, antihyp, pr_intv, sigmurm, phf):
# co
|
nvert seconds to milliseconds as used in regression
pr_intv = pr_intv * 1000.0
# inexplicable conversion
pr_intv = pr_intv / 10.0
# this was done in the js, and the output seems much more realistic than otherwise, but it seems inexplicable!
# perhaps the coefficient shown in FHS's website is erroneous? Or uses the wrong units? It is hard to say.
import numpy as np
# betas
betas = np.array([
1.994060,
|
#gender
0.150520, #age
0.019300, #bmi Body Mass Index
0.00615, #sbp Systolic Blood Pressure
0.424100, #hrx Treatment for hypertension
0.070650, #pr_intv PR interval
3.795860, #vhd Significant Murmur
9.428330, #hxchf Prevalent Heart Failure
-0.000380, #age2 age squared
-0.000280, #gender_age2 male gender times age squared
-0.042380, #age_vhd age times murmur
-0.123070 #age_hxchf age times prevalent heart failure
])
s0 = 0.96337 # "const is from the spreadsheet"
xbar_value = 10.785528582
values = [ismale, age, bmi, sbp, antihyp, pr_intv, sigmurm, phf]
# calculate derived values
values.append(age*age) # age squared
values.append(ismale*age*age) # gender times age squared
values.append(sigmurm*age) # age times significant murmur
values.append(phf*age)
values = np.array(values)
# dot product
value = np.dot(values, betas)
# calculate using cox regression model
risk = 1.0 - np.power(s0, np.exp(value - xbar_value));
# cap at .3
#if (risk > .3) : risk = .3 # is this justified by the paper?
return risk
|
wbinventor/openmc
|
tests/unit_tests/test_universe.py
|
Python
|
mit
| 2,900
| 0
|
import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError):
u.add_cells(c1)
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.approx((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def test_plot(run_in_tmpdir, sphere_model):
m = sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
)
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(un
|
iv.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = se
|
t(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells))
|
mne-tools/mne-tools.github.io
|
stable/_downloads/3d312e85f6ffa492419d3828dd31227d/dics_source_power.py
|
Python
|
bsd-3-clause
| 3,552
| 0
|
"""
.. _ex-inverse-source-power:
==========================================
Compute source power using DICS beamformer
==========================================
Compute a Dynamic Imaging of Coherent Sources (DICS) :footcite:`GrossEtAl2001`
filter from single-trial activity to estimate source power across a frequency
band. This example demonstrates how to source localize the event-related
synchronization (ERS) of beta band activity in the
:ref:`somato dataset <somato-dataset>`.
"""
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Roman Goj <roman.goj@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
# %%
# Reading the raw data and creating epochs:
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Use a shorter segment of r
|
aw just for speed here
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(0, 120) # one minute for speed (looks similar to using all ~800 sec)
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)
del raw
# Paths to forward operator and FreeSurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.j
|
oin(data_path, 'derivatives', 'freesurfer', 'subjects')
# %%
# We are interested in the beta band. Define a range of frequencies, using a
# log scale, from 12 to 30 Hz.
freqs = np.logspace(np.log10(12), np.log10(30), 9)
# %%
# Computing the cross-spectral density matrix for the beta frequency band, for
# different time intervals. We use a decim value of 20 to speed up the
# computation in this example at the loss of accuracy.
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)
info = epochs.info
del epochs
# %%
# To compute the source power for a frequency band, rather than each frequency
# separately, we average the CSD objects across frequencies.
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
# %%
# Computing DICS spatial filters using the CSD that was computed on the entire
# timecourse.
fwd = mne.read_forward_solution(fname_fwd)
filters = make_dics(info, fwd, csd, noise_csd=csd_baseline,
pick_ori='max-power', reduce_rank=True, real_filter=True)
del fwd
# %%
# Applying DICS spatial filters separately to the CSD computed using the
# baseline and the CSD computed during the ERS activity.
baseline_source_power, freqs = apply_dics_csd(csd_baseline, filters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)
# %%
# Visualizing source power during ERS activity relative to the baseline power.
stc = beta_source_power / baseline_source_power
message = 'DICS source power in the 12-30 Hz frequency band'
brain = stc.plot(hemi='both', views='axial', subjects_dir=subjects_dir,
subject=subject, time_label=message)
# %%
# References
# ----------
# .. footbibliography::
|
d15123601/geotinkering
|
make_shpfile.py
|
Python
|
mit
| 1,216
| 0.005757
|
"""
This is for taking a json file and putting it into a shapefile.
It uses the generic functions written by mfoley.
"""
from geo_utils import get_data_f
|
rom_geoserver
import json
import six
from fiona import collection
import pyproj
from shapely.geometry import Point, mapping
op_file = "museums.shp"
server = "mf2.dit.ie:8080"
dbase = "dit:dublin_museums"
crs_from = pyproj.Proj("+init=EPSG:4326")
crs_to = pyproj.Proj("+init=EPSG:2157")
museums = get_data_from_geoserver(server, dbase)
pts = {}
for place in museums['features']:
pts[place['properties']['name']] = (place['geometry']['coordinates'])
schema = { 'ge
|
ometry': 'Point', 'properties': { 'name': 'str' } }
with collection(
op_file, "w", "ESRI Shapefile", schema) as output:
for k, v in pts.items():
x, y = pyproj.transform(crs_from, crs_to, v[0], v[1])
point = Point(x, y)
output.write({'properties': {'name': k},'geometry': mapping(point)})
def make_a_shapefile(source, *dest):
if isinstance(source, dict) and source["type"] == "FeatureCollection":
print('This is a FC')
if len(source["features"]):
print('There is gj_stack')
if isinstance(source, list):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.