repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
blckshrk/Weboob
|
modules/mangareader/__init__.py
|
Python
|
agpl-3.0
| 806
| 0
|
# -*- coding: utf-8 -*-
# Copyright(C) 2011 Noé Rubinstein
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it unde
|
r the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
|
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .backend import MangareaderBackend
__all__ = ['MangareaderBackend']
|
cuauv/software
|
vision/utils/image_ordering_test.py
|
Python
|
bsd-3-clause
| 877
| 0.003421
|
#!/usr/bin/env python3
import cv2
import numpy as np
from vision import camera_message_framework
import itertools
import time
shape = (500, 500, 3)
size = 1
for dim in shape:
size *= dim
def image_of(axes):
im = np.zeros(shape, dtype=np.uint8)
im[:, :, axes] = 255
return im
black = image_of([]), 'black'
red = image_of([2]), 'red'
green = image_of([1]), 'green'
blue = image_of([0]), 'blue'
yellow = image_of([2, 1]), 'yellow'
cyan = image_of([1, 0]), 'cyan'
pink = image_of([0, 2]), 'pink'
white = image_of([0, 1, 2]), 'white'
images = [black, r
|
ed, green, blue, yellow, cyan, pink, white]
f = camera_message_framework.Creator('forward', size)
def main():
for im, name in itertools.cycle(images):
f.write_frame(im, int(time.time() * 1000))
print('wrote {}'.f
|
ormat(name))
time.sleep(1)
if __name__ == '__main__':
main()
|
xiejian1985/Test
|
100_Python.py
|
Python
|
gpl-3.0
| 2,354
| 0.017819
|
# coding:utf-8
def test_1():
'''
1,2,3,4组成无重复的三位数
'''
for a in range(1, 5):
for b in range(1, 5):
for c in range(1, 5):
if (a!=b) and (b!=c) and (a!=c):
print(a,b,c)
print('\n------------test_1 END---------------\n')
def test_2():
'''
企业发放的奖金根据利润提成
利润(I)低于或等于10万元时,奖金可提10%;利润(I)≤10万 ————10%
利润高于10万元,低于20万元时,低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;
20万到40万之间时,高于20万元的部分,可提成5%;
40万到60万之间时高于40万元的部分,可提成3%;
60万到100万之间时,高于60万元的部分,可提成1.5%
高于100万元时,超过100万元的部分按1%提成
从键盘输入当月利润I,求应发放奖金总数
'''
x = int(input('请输入当月利润:'))
bonus = 0
if x <= 100000:
bonus = 0.1*x
print('应发奖金总数:', bonus, '元')
elif 100000<x<=200000:
bonus = 10000+0.075*(x-100000)
print('应发奖金总数:', bonus, '元')
elif 200000<x<=400000:
bonus = 10000+7500+0.05*(x-200000)
print('应发奖金总数:', bonus, '元')
elif 400000<x<=600000:
bonus = 10000+7500+10000+0.03*(x-400000)
print('应发奖金总数:', bonus, '元')
elif 600000<x<=1000000:
bonus = 10000+7500+10000+6000+0.015*(x-600000)
print('
|
应发奖金总数:', bonus, '元')
elif x>1000000:
bonus = 10000+7500+10000+6000+6000+0.01*(x-1000000)
print('应发奖金总数:', bonus, '元')
print('\n------------test_2 END---------------\n')
def test_3():
'''
输入三个整数x,y,z,请把这三个数由小到大输出
'''
list1 = []
for i in range(3):
x = int(input('请输入整数: \n'))
list1.append(x)
list1.sort()
print(list1)
def test_4():
'''
斐波那契数列
'''
a = 1
b = 1
for i i
|
n range(10):
a, b = b, a+b
print(a)
'''
'''
def main():
for i in range(1, 4):
func = 'test_{0}()'.format(i)
exec (func)
if __name__ == "__main__":
main()
|
ldjebran/robottelo
|
tests/foreman/ui/test_computeprofiles.py
|
Python
|
gpl-3.0
| 2,049
| 0.003416
|
"""Test class for Compute Profile UI
:Requirement: Computeprofile
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: ComputeResources
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_string
from nailgun import entities
from robottelo.decorators import tier2, upgrade
@tier2
@upgrade
def test_positive_end_to_end(session, module_loc, module_org):
"""Perform end to end testing for compute profile component
:id: 5445fc7e-7b3f-472f-8a94-93f89aca6c22
:expectedresults: All expected CRUD actions finished successfully
:CaseLevel: Integration
:CaseImportance: High
"""
name = gen_string('alpha')
new_name = gen_string('alpha')
compute_resource = entities.LibvirtComputeResource(
location=[module_loc],
organization=[module_org],
url='qemu+ssh://root@test/system'
).create()
with session:
session.computeprofile.create({'name': name})
assert entities.ComputeProfile().search(query={'search': 'name={0}'.format(name)}), \
'Compute profile {0} expected to exist, but is not included in the search '\
'results'.format(name)
compute_resource_list = session.computeprofile.list_resources(name)
assert '{} (Libvirt)'.format
|
(compute_resource.name) in [resource['Compute Resource'] for
resource in
|
compute_resource_list]
session.computeprofile.rename(name, {'name': new_name})
assert entities.ComputeProfile().search(query={'search': 'name={0}'.format(new_name)}), \
'Compute profile {0} expected to exist, but is not included in the search ' \
'results'.format(new_name)
session.computeprofile.delete(new_name)
assert not entities.ComputeProfile().search(
query={'search': 'name={0}'.format(new_name)}),\
'Compute profile {0} expected to be deleted, but is included in the search ' \
'results'.format(new_name)
|
Justyer/KuaikanSpider
|
KuaikanSpider/KuaikanSpider/items.py
|
Python
|
mit
| 537
| 0.003724
|
# -*- coding: utf-8
|
-*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
c
|
lass ImgItem(scrapy.Item):
image_character = scrapy.Field()
image_titles = scrapy.Field()
image_urls = scrapy.Field()
image_paths = scrapy.Field()
class ImgSingleItem(scrapy.Item):
image_character = scrapy.Field()
image_picindex = scrapy.Field()
image_title = scrapy.Field()
image_url = scrapy.Field()
image_path = scrapy.Field()
|
makinacorpus/pygal
|
pygal/graph/bar.py
|
Python
|
lgpl-3.0
| 5,037
| 0
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope th
|
at it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a c
|
opy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Bar chart
"""
from __future__ import division
from pygal.graph.graph import Graph
from pygal.util import swap, ident, compute_scale, decorate
class Bar(Graph):
"""Bar graph"""
_series_margin = .06
_serie_margin = .06
def __init__(self, *args, **kwargs):
self._x_ranges = None
super(Bar, self).__init__(*args, **kwargs)
def _bar(self, parent, x, y, index, i, zero,
secondary=False, rounded=False):
width = (self.view.x(1) - self.view.x(0)) / self._len
x, y = self.view((x, y))
series_margin = width * self._series_margin
x += series_margin
width -= 2 * series_margin
width /= self._order
x += index * width
serie_margin = width * self._serie_margin
x += serie_margin
width -= 2 * serie_margin
height = self.view.y(zero) - y
r = rounded * 1 if rounded else 0
self.svg.transposable_node(
parent, 'rect',
x=x, y=y, rx=r, ry=r, width=width, height=height,
class_='rect reactive tooltip-trigger')
transpose = swap if self.horizontal else ident
return transpose((x + width / 2, y + height / 2))
def bar(self, serie_node, serie, index, rescale=False):
"""Draw a bar graph for a serie"""
bars = self.svg.node(serie_node['plot'], class_="bars")
if rescale and self.secondary_series:
points = [
(x, self._scale_diff + (y - self._scale_min_2nd) * self._scale)
for x, y in serie.points if y is not None]
else:
points = serie.points
for i, (x, y) in enumerate(points):
if None in (x, y) or (self.logarithmic and y <= 0):
continue
metadata = serie.metadata.get(i)
bar = decorate(
self.svg,
self.svg.node(bars, class_='bar'),
metadata)
val = self._format(serie.values[i])
x_center, y_center = self._bar(
bar, x, y, index, i, self.zero, secondary=rescale,
rounded=serie.rounded_bars)
self._tooltip_data(
bar, val, x_center, y_center, classes="centered")
self._static_value(serie_node, val, x_center, y_center)
def _compute(self):
if self._min:
self._box.ymin = min(self._min, self.zero)
if self._max:
self._box.ymax = max(self._max, self.zero)
x_pos = [
x / self._len for x in range(self._len + 1)
] if self._len > 1 else [0, 1] # Center if only one value
self._points(x_pos)
y_pos = compute_scale(
self._box.ymin, self._box.ymax, self.logarithmic, self.order_min
) if not self.y_labels else list(map(float, self.y_labels))
self._x_labels = self.x_labels and list(zip(self.x_labels, [
(i + .5) / self._len for i in range(self._len)]))
self._y_labels = list(zip(map(self._format, y_pos), y_pos))
def _compute_secondary(self):
if self.secondary_series:
y_pos = list(zip(*self._y_labels))[1]
ymin = self._secondary_min
ymax = self._secondary_max
min_0_ratio = (self.zero - self._box.ymin) / self._box.height or 1
max_0_ratio = (self._box.ymax - self.zero) / self._box.height or 1
if ymax > self._box.ymax:
ymin = -(ymax - self.zero) * (1 / max_0_ratio - 1)
else:
ymax = (self.zero - ymin) * (1 / min_0_ratio - 1)
left_range = abs(self._box.ymax - self._box.ymin)
right_range = abs(ymax - ymin) or 1
self._scale = left_range / right_range
self._scale_diff = self._box.ymin
self._scale_min_2nd = ymin
self._y_2nd_labels = [
(self._format(self._box.xmin + y * right_range / left_range),
y)
for y in y_pos]
def _plot(self):
for index, serie in enumerate(self.series):
self.bar(self._serie(index), serie, index)
for index, serie in enumerate(self.secondary_series, len(self.series)):
self.bar(self._serie(index), serie, index, True)
|
robinson96/GRAPE
|
test/testGrape.py
|
Python
|
bsd-3-clause
| 7,398
| 0.005812
|
#!/usr/bin/env python
import sys
import os
import inspect
import unittest
import StringIO
import shutil
import tempfile
curPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
if not curPath in sys.path:
sys.path.insert(0, curPath)
grapePath = os.path.join(curPath, "..")
if grapePath not in sys.path:
sys.path.insert(0, grapePath)
from vine import grapeGit as git
from vine import grapeConfig
from vine import grapeMenu
str1 = "str1 \n a \n b\n c\n"
str2 = "str2 \n a \n c\n c\n"
str3 = "str3 \n a \n d\n c\n"
def writeFile1(path):
with open(path, 'w') as f:
f.write(str1)
def writeFile2(path):
with open(path, 'w') as f:
f.write(str2)
def writeFile3(path):
with open(path, 'w') as f:
f.write(str3)
class TestGrape(unittest.TestCase):
def printToScreen(self, str):
self.stdout.write(str)
def switchToStdout(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
def switchToHiddenOutput(self):
sys.stdout = self.output
sys.stderr = self.error
def __init__(self, superArg):
super(TestGrape, self).__init__(superArg)
self.defaultWorkingDirectory = tempfile.mkdtemp()
self.repos = [os.path.join(self.defaultWorkingDirectory, "testRepo"),
os.path.join(self.defaultWorkingDirectory, "testRepo2")]
self.repo = self.repos[0]
self._debug = False
def setUpConfig(self):
grapeMenu._resetMenu()
grapeMenu.menu()
config = grapeConfig.grapeConfig()
config.set("flow", "publicBranches", "master")
config.set("flow", "topicPrefixMappings", "?:master")
config.set("workspace", "submoduleTopicPrefixMappings", "?:master")
def setUp(self):
# setUp stdout and stderr wrapping to capture
# messages from the modules that we test
self.output = StringIO.StringIO()
self.error = StringIO.StringIO()
self.stdout = sys.stdout
self.stderr = sys.stderr
self.stdin = sys.stdin
self.cwd = os.getcwd()
sys.stdout = self.output
sys.stderr = self.error
# create a test repository to operate in.
try:
try:
os.mkdir(self.repo + "-origin")
except OSError:
pass
os.chdir(self.repo + "-origin")
cwd = os.getcwd()
git.gitcmd("init --bare", "Setup Failed")
os.chdir(os.path.join(self.repo+"-origin",".."))
git.gitcmd("clone %s %s" % (self.repo +"-origin",self.repo), "could not clone test bare repo")
os.chdir(self.repo)
fname = os.path.join(self.repo, "testRepoFile")
writeFile1(fname)
self.file1 = fname
git.gitcmd("add %s" % fname, "Add Failed")
git.gitcmd("commit -m \"initial commit\"", "Commit Failed")
git.gitcmd("push origin master", "push to master failed")
# create a develop branch in addition to master by default
git.bra
|
nch("develop")
git.push("origin develop")
os.chdir(os.path.join(self.repo, ".."))
except git.GrapeGitError:
pass
self.menu = grapeMenu.menu()
if self._debug:
self.switchToStdout()
def tearDown(self):
def onError(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``, primarily for
|
Windows.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise Exception
if self._debug:
self.switchToHiddenOutput()
os.chdir(os.path.abspath(os.path.join(self.defaultWorkingDirectory,"..")))
shutil.rmtree(self.defaultWorkingDirectory, False, onError)
# restore stdout, stdin, and stderr to their original streams
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
os.chdir(self.cwd)
self.output.close()
# reset grapeConfig and grapeMenu
grapeConfig.resetGrapeConfig()
grapeMenu._resetMenu()
# print the captured standard out
def printOutput(self):
for l in self.output:
self.stdout.write(l)
# print the captured standard error
def printError(self):
for l in self.error:
self.stderr.write(l)
# stage user input for methods that expect it
def queueUserInput(self, inputList):
self.input = StringIO.StringIO()
sys.stdin = self.input
self.input.writelines(inputList)
self.input.seek(0)
def assertTrue(self, expr, msg=None):
if msg is not None:
msg += "\n%s" % self.output.getvalue()
super(TestGrape, self).assertTrue(expr, msg=msg)
def assertFalse(self, expr, msg=None):
if msg is not None:
msg += "\n%s" % self.output.getvalue()
super(TestGrape, self).assertFalse(expr, msg=msg)
def buildSuite(cls, appendTo=None):
suite = appendTo
if suite is None:
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(cls))
return suite
def main(argv, debug=False):
import testBranches
import testClone
import testConfig
import testMergeDevelop
import testGrapeGit
import testReview
import testVersion
import testPublish
import testCO
import testNestedSubproject
import testStatus
import testUpdateLocal
import testUtility
testClasses = {"Branches":testBranches.TestBranches,
"Clone":testClone.TestClone,
"Config":testConfig.TestConfig,
"GrapeGit":testGrapeGit.TestGrapeGit,
"MergeDevelop":testMergeDevelop.TestMD,
"Review":testReview.TestReview,
"Version":testVersion.TestVersion,
"Publish":testPublish.TestPublish,
"CO":testCO.TestCheckout,
"NestedSubproject":testNestedSubproject.TestNestedSubproject,
"Status":testStatus.createStatusTester(),
"GrapeUp":testUpdateLocal.createUpTester(),
"Utility":testUtility.TestUtility }
suite = unittest.TestSuite()
if len(argv) == 0:
for cls in testClasses.values():
suite = buildSuite(cls, suite)
else:
if argv[0] == "listSuites":
print testClasses.keys()
exit(0)
for cls in [testClasses[arg] for arg in argv]:
suite = buildSuite(cls, suite)
if debug:
for cls in suite:
for case in cls:
print case
case._debug = True
suite._tests
result = unittest.TextTestRunner(verbosity=2).run(suite)
return result.wasSuccessful()
if __name__ == "__main__":
main(sys.argv[1:], debug=False)
|
chrisvire/aeneas
|
aeneas/ttswrappers/basettswrapper.py
|
Python
|
agpl-3.0
| 34,887
| 0.00192
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.ttswrappers.basettswrapper.TTSCache`,
a TTS cache;
* :class:`~aeneas.ttswrappers.basettswrapper.BaseTTSWrapper`,
an abstract wrapper for a TTS engine.
"""
from __future__ import absolute_import
from __future__ import print_function
import io
import subprocess
from aeneas.audiofile import AudioFile
from aeneas.audiofile import AudioFileUnsupportedFormatError
from aeneas.exacttiming import TimeValue
from aeneas.logger import Loggable
from aeneas.runtimeconfiguration import RuntimeConfiguration
import aeneas.globalfunctions as gf
class TTSCache(Loggable):
"""
A TTS cache, that is,
a dictionary whose keys are pairs
``(fragment_language, fragment_text)``
and whose values are pairs
``(file_handler, file_path)``.
An item in the cache means that the text of the key
has been synthesized to the file
located at the path of the corresponding value.
Note that it is not enough to store
the string of the text as the key,
since the same text might be pronounced in a different language.
Also note that the values also store the file handler,
since we might want to close it explicitly
before removing the file from disk.
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
TAG = u"TTSCache"
def __init__(self, rconf=None, logger=None):
super(TTSCache, self).__init__(rconf=rconf, logger=logger)
self._initialize_cache()
def _initialize_cache(self):
self.cache = dict()
self.log(u"Cache initialized")
def __len__(self):
return len(self.cache)
def keys(self):
"""
Return the sorted list of keys currently in the cache.
:rtype: list of tuples ``(language, text)``
"""
return sorted(list(self.cache.keys()))
def is_cached(self, fragment_info):
"""
Return ``True`` if the given ``(language, text)`` key
is present in the cache, or ``False`` otherwise.
:rtype: bool
"""
return fragment_info in self.cache
def add(self, fragment_info, file_info):
"""
Add the given ``(key, va
|
lue)`` pair to the cache.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:param file_info: the path value
:type file_info: tuple
|
``(handler, path)``
:raises: ValueError if the key is already present in the cache
"""
if self.is_cached(fragment_info):
raise ValueError(u"Attempt to add text already cached")
self.cache[fragment_info] = file_info
def get(self, fragment_info):
"""
Get the value associated with the given key.
:param fragment_info: the text key
:type fragment_info: tuple of str ``(language, text)``
:raises: KeyError if the key is not present in the cache
"""
if not self.is_cached(fragment_info):
raise KeyError(u"Attempt to get text not cached")
return self.cache[fragment_info]
def clear(self):
"""
Clear the cache and remove all the files from disk.
"""
self.log(u"Clearing cache...")
for file_handler, file_info in self.cache.values():
self.log([u" Removing file '%s'", file_info])
gf.delete_file(file_handler, file_info)
self._initialize_cache()
self.log(u"Clearing cache... done")
class BaseTTSWrapper(Loggable):
"""
An abstract wrapper for a TTS engine.
It calls the TTS executable or library, passing parameters
like the text string and languages, and it produces
a WAVE file on disk and a list of time anchors.
In case of multiple text fragments, the resulting WAVE files
will be joined together in a single WAVE file.
The TTS parameters, their order, and the switches
can be configured in the concrete subclass
for a specific TTS engine.
For example, it might perform one or more calls like ::
$ echo "text" | tts -v voice_code -w output_file.wav
or
$ tts -eval "(voice_code)" -i text_file.txt -o output_file.wav
The call methods will be attempted in the following order:
1. direct Python call
2. Python C extension
3. TTS executable via ``subprocess``
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
:raises: NotImplementedError: if none of the call methods is available
"""
CLI_PARAMETER_TEXT_PATH = "TEXT_PATH"
"""
Placeholder to specify the path to the UTF-8 encoded file
containing the text to be synthesized,
to be read by the TTS engine.
"""
CLI_PARAMETER_TEXT_STDIN = "TEXT_STDIN"
"""
Placeholder to specify that the TTS engine
reads the text to be synthesized from stdin.
"""
CLI_PARAMETER_VOICE_CODE_FUNCTION = "VOICE_CODE_FUNCTION"
"""
Placeholder to specify a list of arguments
for the TTS engine to select the TTS voice
to be used for synthesizing the text.
"""
CLI_PARAMETER_VOICE_CODE_STRING = "VOICE_CODE_STRING"
"""
Placeholder for the voice code string.
"""
CLI_PARAMETER_WAVE_PATH = "WAVE_PATH"
"""
Placeholder to specify the path to the audio file
to be synthesized by the TTS engine.
"""
CLI_PARAMETER_WAVE_STDOUT = "WAVE_STDOUT"
"""
Placeholder to specify that the TTS engine
outputs the audio data to stdout.
"""
LANGUAGE_TO_VOICE_CODE = {}
"""
Map a language code to a voice code.
Concrete subclasses must populate this class field,
according to the language and voice codes
supported by the TTS engine they wrap.
"""
CODE_TO_HUMAN = {}
"""
Map from voice code to human-readable name.
"""
CODE_TO_HUMAN_LIST = []
"""
List of all language codes with their human-readable names.
"""
OUTPUT_AUDIO_FORMAT = None
"""
A tuple ``(codec, channels, rate)``
specifying the format
of the audio file generated by the TTS engine,
for example ``("pcm_s16le", 1, 22050)``.
If unknown, set it to ``None``:
in this case, the audio file will be converted
to PCM16 mono WAVE (RIFF) as needed.
"""
DEFAULT_LANGUAGE = None
"""
The default language for this TTS engine.
Concrete subclasses must populate this class field,
according to the languages supported
by the TTS engine they wrap.
"""
DEFAULT_TTS_PATH = None
"""
The default path for this TTS engine,
when called via ``subprocess``,
otherwise set it to ``None``.
"""
HAS_SUBPROCESS_CALL = False
"""
If ``True``, the TTS wrapper can invoke the TTS engine
via ``subprocess``.
"""
HAS_C_EXTENSION_CALL = False
"""
If ``True``, the TTS wrapper can
|
gas1121/JapanCinemaStatusSpider
|
scrapyproject/pipelines.py
|
Python
|
mit
| 6,140
| 0.000489
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapyproject.models import (Cinema, Showing, ShowingBooking, Movie,
db_connect, drop_table_if_exist,
create_table, Session)
from scrapyproject.items import (CinemaItem, ShowingItem, ShowingBookingItem,
MovieItem)
from scrapyproject.utils import (use_cinema_database,
use_showing_database,
use_movie_database)
class DataBasePipeline(object):
"""
pipeline to add item to database
will keep exist data if spider has attribute 'keep_old_data'
"""
def __init__(self, database):
self.database = database
# keep crawled movie to sum cinema count
self.crawled_movies = {}
@classmethod
def from_crawler(cls, crawler):
return cls(database=crawler.settings.get('DATABASE'))
def open_spider(self, spider):
engine = db_connect()
if not spider.keep_old_data:
# drop data
if use_showing_database(spider):
drop_table_if_exist(engine, ShowingBooking)
drop_table_if_exist(engine, Showing)
elif use_cinema_database(spider):
drop_table_if_exist(engine, Cinema)
elif use_movie_database(spider):
drop_table_if_exist(engine, Movie)
create_table(engine)
def close_spider(self, spider):
for title in self.crawled_movies:
self.process_movie_item(self.crawled_movies[title], spider)
# close global session when spider ends
Session.remove()
def process_item(self, item, spider):
"""
use cinema table if spider has attribute "use_cinema_database"
use showing table if spider has attribute "use_showing_database"
a spider should not have both attributes
"""
if isinstance(item, CinemaItem):
return self.process_cinema_item(item, spider)
elif isinstance(item, ShowingItem):
return self.process_showing_item(item, spider)
elif isinstance(item, ShowingBookingItem):
return self.process_showing_booking_item(item, spider)
elif isinstance(item, MovieItem):
# sum cinema count for each cinema
if item['title'] not in self.crawled_movies:
self.crawled_movies[item['title']] = item
else:
count = (item['current_cinema_count'] +
self.crawled_movies[item['title']]['current_cinema_count'])
self.crawled_movies[item['title']]['current_cinema_count'] = count
return item
def process_cinema_item(self, item, spider):
cinema = Cinema(**item)
exist_cinema = Cinema.get_cinema_if_exist(cinema)
if not exist_cinema:
# if data do not exist in database, add it
self.add_item_to_database(cinema)
else:
# otherwise check if it should be merged to exist record
# merge strategy:
# - if exist data is crawled from other source, only add names
# and screens to exist data;
# - if cinema do not have site url, item is treated as duplicate
# and dropped;
# - otherwise, merge all data
if cinema.source != exist_cinema.source:
# replace when new cinema data crawled more screens
if cinema.screen_count > exist_cinema.screen_count:
exist_cinema.merge(
cinema, merge_method=Cinema.MergeMethod.replace)
else:
exist_cinema.merge(
cinema, merge_method=Cinema.MergeMethod.info_only)
self.add_item_to_database(exist_cinema)
elif cinema.site:
exist_cinema.merge(
cinema, merge_method=Cinema.MergeMethod.update_count)
self.add_item_to_database(exist_cinema)
return item
def process_showing_item(self, item, spider):
showing = Showing(**item)
# if data do not exist in database, add it
if not Showing.get_showing_if_exist(showing):
self.add_item_to_database(showing)
return item
def process_showing_booking_item(self, item, spider):
showing_booking = ShowingBooking()
showing_booking.from_item(item)
# if showing exists use its id in database
exist_showing = Showing.get_showing_if_exist(showing_booking.showing)
if exist_showing:
old_showing = showing_booking.showing
showing_booking.showing = exist_showing
showing_booking.showing.title = old_showing.title
showing_booking.showing.title_en = old_showing.title_en
showing_b
|
ooking.showing.start_time = old_showing.start_time
showing_booking.showing.end_time = old_show
|
ing.end_time
showing_booking.showing.cinema_name = old_showing.cinema_name
showing_booking.showing.cinema_site = old_showing.cinema_site
showing_booking.showing.screen = old_showing.screen
showing_booking.showing.seat_type = old_showing.seat_type
showing_booking.showing.total_seat_count = \
old_showing.total_seat_count
showing_booking.showing.source = old_showing.source
# then add self
self.add_item_to_database(showing_booking)
return item
def process_movie_item(self, item, spider):
movie = Movie(**item)
# if data do not exist in database, add it
if not Movie.get_movie_if_exist(movie):
self.add_item_to_database(movie)
return item
def add_item_to_database(self, db_item):
try:
db_item = Session.merge(db_item)
Session.commit()
except:
Session.rollback()
raise
|
skosukhin/spack
|
var/spack/repos/builtin/packages/sickle/package.py
|
Python
|
lgpl-2.1
| 1,879
| 0.000532
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Sickle(MakefilePackage):
"""Sickle is a tool that uses
|
sliding windows along with quality and
length thresholds to determine when quality is sufficiently low to trim
the 3'-end of reads and also determines when the quality is
sufficiently high enough to trim the 5'-end of reads."""
homepage = "https://github.com/najoshi/sickle"
url = "https://github.com/najoshi/sickle/archive/v1.33.tar.gz"
version('1.33', '9e2ba812183e1515198c9e15c4cd2cd7')
depends_on('zlib')
de
|
f install(self, spec, prefix):
mkdirp(prefix.bin)
install('sickle', prefix.bin)
|
sephalon/python-ivi
|
ivi/agilent/agilent8590A.py
|
Python
|
mit
| 1,520
| 0.001316
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this
|
permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISIN
|
G FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590A import *
class agilent8590A(agilentBase8590A):
"Agilent 8590A IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8590A')
super(agilent8590A, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 10e3
self._frequency_high = 1.5e9
|
frogeyedpeas/ChalupaCity
|
working_pa1/auto_grader.py
|
Python
|
mit
| 7,382
| 0.013411
|
#! /usr/bin/python
import os, sys, glob, time, subprocess, signal
import popen2
subdirectories = ['first', 'second', 'third', 'fourth', 'fifth']
formats = {'first':'line', 'second':'line', 'third':'file', 'fourth':'file', 'fifth':'file'}# if a program has single liner input and output, we put all test cases in single file. Otherwise, we have a file for test and associated file with results
class ExperimentError(Exception):
def __init__(self, command, output):
self.command = command
limit = 10000
if(len(output) > limit):
self.output = output[:limit/2] + "\n\n...TRUNCATED...\n\n" + output[-limit/2:]
else:
self.output = output
def __str__(self):
return "ExperimentError:" + `self.command`
def run_command(command_string, input_string="", max_lines=0, verbose=False, echo=True, throw_exception=True, return_valgrind_output=False, user_program=False):
if echo:
print "executing:", command_string
obj = popen2.Popen4(command_string)
output = ""
valgrind_output = ""
obj.tochild.write(input_string)
obj.tochild.close()
valgrind_prefix = "==%d==" % obj.pid
maxSleep = 20
if user_program: #program may have an infinite loop
while maxSleep>0:
time.sleep(0.25)
maxSleep-=1
if obj.poll()!=-1:
break
if maxSleep==0 and obj.poll()==-1:
os.kill(obj.pid, signal.SIGKILL)
print command_string, " taking longer than expected. Killed."
return ""
line = obj.fromchild.readline()
while (line):
if verbose == 1:
print line,
if line.startswith(valgrind_prefix):
valgrind_output += line
output += line
line = obj.fromchild.readline()
exit_status = obj.wait()
if(max_lines != 0):
lines = output.split("\n");
output = string.join(lines[-max_lines:], "\n")
if throw_exception and exit_status != 0:
raise ExperimentError(command_string, output)
if return_valgrind_output:
return valgrind_output
else:
return output
def compare_string_file(ref_file, test_string, show_difference=False):
test_list=test_string.split("\n")
fd = open(ref_file)
i=0
flag=True
for line in fd:
if i<len(test_list):
if line.strip()!=test_list[i].strip():
flag=False
elif len(line.strip())>0:
print "Output missing: ", line
flag=False
i+=1
fd.close()
while(i<len(test_list)):
if len(test_list[i].strip())==0:
i+=1
continue
print "Extra output: ", test_list[i]
i+=1
flag=False
return flag
def compare_string(ref, test):
ref = ref.strip()
test = test.strip()
if(ref==test):
return True
if(ref==test.lower()):
print "%s and %s are in different case. Please print your output in correct case."%(ref, test)
return False
def make_executable(dirname):
if os.path.isfile('Makefile') or os.path.isfile('makefile'):
run_command("make clean", verbose=False)
|
run_command("make", verbose=True)
else:
print "No Makefile found in", dirname
print "Please submit a Makefile to receive full grade."
run_command("gcc -o %s *.c *.h"%(dirname), verbose=False)
def file_grade(dirname):
print "Grading", dirname
prevdir = os.getcwd()
os.chdi
|
r(dirname)
make_executable(dirname)
if not os.path.isfile(dirname):
print "Executable %s missing. Please check the compilation output."%(dirname)
return
for testfile in sorted(os.listdir(".")):
if os.path.isdir(testfile) or not testfile.startswith("test"):
continue
resultfile = "result"+testfile[4:len(testfile)]
if not os.path.isfile(resultfile):
print "Found a test file %s. But, no associated result file."%(testfile)
continue
print "Found a test file %s. The output will be compared to %s."%(testfile, resultfile)
ret = run_command("./%s %s"%(dirname, testfile), user_program=True)
if compare_string_file(resultfile, ret, show_difference=True):
print "The output is correct for input file %s."%(testfile)
else:
print "The output is not correct for input file %s."%(testfile)
print ""
print ""
os.chdir(prevdir)
def single_grade(dirname):
print "Grading", dirname
prevdir = os.getcwd()
os.chdir(dirname)
make_executable(dirname)
if not os.path.isfile(dirname):
print "Executable %s missing. Please check the compilation output."%(dirname)
return
if not os.path.isfile("test.txt"):
print "Expecting the test cases in test.txt. Not found."
return
else:
print "Using test.txt for grading."
fd = open("test.txt")
state = 0
for line in fd:
if state==0:
inputline = line
state = 1
else:
outputline = line
state = 0
ret = run_command("./%s %s"%(dirname, inputline.strip()), user_program=True)
print "Your program generated %s. The correct answer is %s."%(ret.strip(), outputline.strip())
if compare_string(outputline, ret):
print "The output is correct for input %s."%(inputline.strip())
else:
print "The output is not correct for input %s."%(inputline.strip())
fd.close()
print ""
os.chdir(prevdir)
def global_grade(dirname):
target = len(subdirectories)
for subdir in subdirectories:
if not os.path.isdir(os.path.join(subdir)):
continue
print subdir, " found!"
if subdir in formats and formats[subdir]=='line':
single_grade(subdir)
elif subdir in formats and formats[subdir]=='file':
file_grade(subdir)
if __name__ == '__main__':
basepath = "pa1"
tarmode = False #by default check the directory
if len(sys.argv)>1:
if sys.argv[1].strip()=='tar':
tarmode=True
if tarmode==False:
if not os.path.isdir(basepath):
print "pa1 is not presnt in this directory."
sys.exit(1)
else:
print "Grading the content of pa1."
os.chdir(basepath)
global_grade(basepath)
else:
prevdir = os.getcwd()
if not os.path.exists("pa1.tar"):
print "Expecting pa1.tar in current directory. Current directory is %s"%(prevdir)
print "Please make sure you created pa1.tar in the right directory"
sys.exit(1)
if os.path.exists("obj_temp"):
print "Deleting the directory obj_temp."
run_command("rm -rf obj_temp", verbose=False)
run_command("mkdir obj_temp", verbose=False)
os.chdir("obj_temp")
run_command("tar -xvf ../pa1.tar")
if os.path.isdir("pa1"):
os.chdir("pa1")
global_grade("pa1")
else:
print "There is not directory named pa1 in pa1.tar."
print "Please check your tar file."
os.chdir(prevdir)
|
kingvuplus/boom
|
lib/python/Screens/RdsDisplay.py
|
Python
|
gpl-2.0
| 10,176
| 0.001671
|
from enigma import iPlayableService, iRdsDecoder
from Screens.Screen import Screen
from Components.ActionMap import NumberActionMap
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN
from Tools.LoadPixmap import LoadPixmap
class RdsInfoDisplaySummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent=parent)
self['message'] = StaticText('')
self.parent.onText.append(self.onText)
def onText(self, message):
self['message'].text = message
if message and len(message):
self.show()
else:
self.hide()
class RdsInfoDisplay(Screen):
ALLOW_SUSPEND = True
def __init__(self, session):
Screen.__init__(self, session)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap={iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRadioText: self.RadioTextChanged,
iPlayableService.evUpdatedRtpText: self.RtpTextChanged,
iPlayableService.evUpdatedRassInteractivePicMask: self.RassInteractivePicMaskChanged})
self['RadioText'] = Label()
self['RtpText'] = Label()
self['RassLogo'] = Pixmap()
self.onLayoutFinish.append(self.hideWidgets)
self.rassInteractivePossible = False
self.onRassInteractivePossibilityChanged = []
self.onText = []
def createSummary(self):
return RdsInfoDisplaySummary
def hideWidgets(self):
for x in (self['RadioText'], self['RtpText'], self['RassLogo']):
x.hide()
for x in self.onText:
x('')
def RadioTextChanged(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
rdsText = decoder and decoder.getText(iRdsDecoder.RadioText)
if rdsText and len(rdsText):
self['RadioText'].setText(rdsText)
self['RadioText'].show()
else:
self['RadioText'].hide()
for x in self.onText:
x(rdsText)
def RtpTextChanged(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
rtpText = decoder and decoder.getText(iRdsDecoder.RtpText)
if rtpText and len(rtpText):
self['RtpText'].setText(rtpText)
self['RtpText'].show()
else:
self['RtpText'].hide()
for x in self.onText:
x(rtpText)
def RassInteractivePicMaskChanged(self):
if not self.rassInteractivePossible:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
mask = decoder and decoder.getRassInteractiveMask()
if mask[0] & 1:
self['RassLogo'].show()
self.rassInteractivePossible = True
for x in self.onRassInteractivePossibilityChanged:
x(True)
def __serviceStopped(self):
self.hideWidgets()
if self.rassInteractivePossible:
self.rassInteractivePossible = False
for x in self.onRassInteractivePossibilityChanged:
x(False)
class RassInteractive(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self['actions'] = NumberActionMap(['NumberActions', 'RassInteractiveActions'], {'exit': self.close,
'0': lambda x: self.numPressed(0),
'1': lambda x: self.numPressed(1),
'2': lambda x: self.numPressed(2),
'3': lambda x: self.numPressed(3),
'4': lambda x: self.numPressed(4),
'5': lambda x: self.numPressed(5),
'6': lambda x: self.numPressed(6),
'7': lambda x: self.numPressed(7),
'8': lambda x: self.numPressed(8),
'9': lambda x: self.numPressed(9),
'nextPage': self.nextPage,
'prevPage': self.prevPage,
'nextSubPage': self.nextSubPage,
'prevSubPage': self.prevSubPage})
self.__event_tracker = ServiceEventTracker(screen=self, eventmap={iPlayableService.evUpdatedRassInteractivePicMask: self.recvRassInteractivePicMaskChanged})
self['subpages_1'] = Pixmap()
self['subpages_2'] = Pixmap()
self['subpages_3'] = Pixmap()
self['subpages_4'] = Pixmap()
self['subpages_5'] = Pixmap()
self['subpages_6'] = Pixmap()
self['subpages_7'] = Pixmap()
self['subpages_8'] = Pixmap()
self['subpages_9'] = Pixmap()
self['Marker'] = Label('>')
self.subpage = {1: self['subpages_1'],
2: self['subpages_2'],
3: self['subpages_3'],
4: self['subpages_4'],
5: self['subpages_5'],
6: self['subpages_6'],
7: self['subpages_7'],
8: self['subpages_8'],
9: self['subpages_9']}
self.subpage_png = {1: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/rass_page1.png')),
2: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/rass_page2.png')),
3: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/rass_page3.png')),
4: LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, 'icons/rass_page4.png'))}
self.current_page = 0
self.current_subpage = 0
self.showRassPage(0, 0)
self.onLayoutFinish.append(self.updateSubPagePixmaps)
def updateSubPagePixmaps(self):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder:
print 'NO RDS DECODER in showRassPage'
else:
mask = decoder.getRassInteractiveMask()
page = 1
while page < 10:
subpage_cnt = self.countAvailSubpages(page, mask)
subpage = self.subpage[page]
if subpage_cnt > 0:
if subpage.instance:
png = self.subpage_png[subpage_cnt]
if png:
subpage.instance.setPixmap(png)
subpage.show()
else:
print 'rass png missing'
else:
subpage.hide()
page += 1
def recvRassInteractivePicMaskChanged(self):
self.updateSubPagePixmaps()
def showRassPage(self, page, subpage):
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder:
print 'NO RDS DECODER in showRassPage'
else:
decoder.showRassInteractivePic(page, subpage)
page_diff = page - self.current_page
self.current_page = page
if page_diff:
current_pos = self['Marker'].getPosition()
y = current_pos[1]
y += page_diff * 25
self['Marker'].setPosition(current_pos[0], y)
def getMaskForPage(self, page, masks = None):
if not masks:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if not decoder:
print 'NO RDS DECODER in getMaskForPage'
masks = decoder.getRassInteractiveMask()
if masks:
mask = masks[page * 4 / 8]
if page % 2:
mask >>= 4
else:
mask &=
|
15
return mask
def countAvailSubpages(self, page, masks):
|
mask = self.getMaskForPage(page, masks)
cnt = 0
while mask:
if mask & 1:
cnt += 1
mask >>= 1
return cnt
def nextPage(self):
mask = 0
page = self.current_page
while mask == 0:
|
4Quant/tensorflow
|
tensorflow/python/framework/importer.py
|
Python
|
apache-2.0
| 13,843
| 0.008596
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A utility function for importing TensorFlow graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
# TODO(josh11b): SWIG the code from node_def_util instead of duplicating
# the logic here.
def _GetNodeAttr(node_def, attr_name):
if attr_name not in node_def.attr:
raise ValueError('Expected one attr with name %r in %s.'
% (attr_name, str(node_def)))
return node_def.attr[attr_name]
def _ArgToTypesNoRef(node_def, arg_def):
if arg_def.number_attr:
repeats = _GetNodeAttr(node_def, arg_def.number_attr).i
if arg_def.type_attr:
dtype = _GetNodeAttr(node_def, arg_def.type_attr).type
else:
assert arg_def.type != types_pb2.DT_INVALID
dtype = arg_def.type
return [dtype] * repeats
elif arg_def.type_attr:
return [_GetNodeAttr(node_def, arg_def.type_attr).type]
elif arg_def.type_list_attr:
return _GetNodeAttr(node_def, arg_def.type_list_attr).list.type
else:
assert arg_def.type != types_pb2.DT_INVALID
return [arg_def.type]
def _SingleArgToTypes(node_def, arg_def):
types = _ArgToTypesNoRef(node_def, arg_def)
if arg_def.is_ref:
return [dtypes.as_dtype(dt).as_ref.as_datatype_enum for dt in types]
return types
def _ArgsToTypes(node_def, arg_list):
types = []
for arg_def in arg_list:
types.extend(_SingleArgToTypes(node_def, arg_def))
return types
def _InputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.input_arg)
def _OutputTypes(node_def, op_dict):
op_def = op_dict[node_def.op]
return _ArgsToTypes(node_def, op_def.output_arg)
def _IsControlInput(input_name):
# Expected format: '^operation_name' (control input).
return input_name.startswith('^')
def _ParseTensorName(tensor_name):
"""Parses a tensor name into an operation name and output index.
This function will canonicalize tensor names as follows:
* "foo:0" -> ("foo", 0)
* "foo:7" -> ("foo", 7)
* "foo" -> ("foo", 0)
* "foo:bar:baz" -> ValueError
Args:
tensor_name: The name of a tensor.
Returns:
A tuple containing the operation name, and the output index.
Raises:
ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.
"""
components = tensor_name.split(':')
if len(components) == 2:
# Expected format: 'operation_name:output_index'.
try:
output_index = int(components[1])
except ValueError:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
return components[0], output_index
elif len(components) == 1:
# Expected format: 'operation_name' (implicit 0th output).
return components[0], 0
else:
raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))
def _CanonicalInputName(input_name):
input_name = compat.as_str(input_name)
if _IsControlInput(input_name):
return input_name
input_op_name, output_index = _ParseTensorName(input_name)
return '%s:%d' % (input_op_name, output_index)
def _InvalidNodeMessage(node, message):
return 'graph_def is invalid at node %r: %s.' % (node.name, message)
@contextlib.contextmanager
def _MaybeDevice(device):
"""Applies the given device only if device is not None or empty."""
if device:
with ops.device(device):
yield
else:
yield
def import_graph_def(graph_def, input_map=None, return_elements=None,
name=None, op_dict=None):
"""Imports the TensorFlow graph in `graph_def` into the Python `Graph`.
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
[`Tensor`](#Tensor) and [`Operation`](#Operation) objects. See
[`Graph.as_graph_def()`](#Graph.as_graph_def) for a way to create a
`GraphDef` proto.
Args:
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
to `Tensor` objects. The values of the named input tensors in the
imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Defaults to `"import"`.
op_dict: (Optional.) A dictionary mapping op type names to `OpDef` protos.
Must contain an `OpDef` proto for each op type named in `graph_def`.
If omitted, uses the `OpDef` protos registered in the global registry.
Returns:
A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
`input_map` is not a dictionary mapping strings to `Tensor` objects,
or `ret
|
urn_elements` is not a list of strings.
ValueError: If `input_map`, or `return
|
_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
it refers to an unknown tensor).
"""
# Type checks for inputs.
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
if input_map is None:
input_map = {}
else:
if not (isinstance(input_map, dict)
and all(isinstance(k, compat.bytes_or_text_types)
for k in input_map.keys())):
raise TypeError('input_map must be a dictionary mapping strings to '
'Tensor objects.')
if return_elements is not None:
return_elements = tuple(return_elements)
if not all(isinstance(x, compat.bytes_or_text_types)
for x in return_elements):
raise TypeError('return_elements must be a list of strings.')
# Use a canonical representation for all tensor names.
input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}
used_input_keys = set()
name_to_op = {}
if op_dict is None:
op_dict = op_def_registry.get_registered_ops()
with ops.op_scope(input_map.values(), name, 'import'):
g = ops.get_default_graph()
g.graph_def_versions.CopyFrom(graph_def.versions)
with ops.name_scope('_inputs'):
input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}
# NOTE(mrry): We do this in two passes, because there may be a cycle in
# `graph_def`.
# 1. Add operations without their inputs.
for node in graph_def.node:
# Set any default attr values that aren't present.
op_def = op_dict[node.op]
for attr_def in op_def.attr:
key = attr
|
nirajkvinit/python3-study
|
30days/day13/templates.py
|
Python
|
mit
| 691
| 0.023155
|
import os
def get_template_path(path):
file_path = os.path.join(os.getcwd(), path)
if not os.path.isfile(file_path):
raise Exception("This is not a valid template path %s"%(file_path))
return file_path
def get_template(path):
file_path = get_template_path(path)
return open(file_path).read()
def render_context(template_string, context):
return template_string.format(**context)
file_ = 'templates/email_message.txt'
file_html = 'templates/email_message.html'
template = get_template
|
(file_)
template_html = get_template(file_html)
context = {
"name": "Niraj",
"date": None,
"total": None
}
print(render_context(template, context
|
))
print(render_context(template_html, context))
|
docwalter/py3status
|
py3status/modules/xsel.py
|
Python
|
bsd-3-clause
| 1,642
| 0.000609
|
# -*- coding: utf-8 -*-
"""
Display X selection.
Configuration parameters:
cache_timeout: refresh interval for this module (default 0.5)
command: the clipboard command to run (default 'xsel -o')
format: display format for this module (default '{selection}')
max_size: strip the selection to this value (default 15)
symmetric: show beginning and end of the selection string
with respect to configured max_size. (default True)
Format placeholders:
{selection} output from clipboard command
Requires:
xsel: a command-line program to retrieve/set the X selection
@author Sublim3 umbsublime@gamil.com
@license BSD
SAMPLE OUTPUT
{'full_text': 'selected text'}
example
{'full_text': 'rrrrr > wtf is a warlock doing in here'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 0.5
command = 'xsel -o'
format = '{selection}'
max_size = 15
symmetric = True
def xsel(self):
selection = self.py3.command_output(self.command)
if len(selection) >= self.max_size:
if self.symmetric is True:
split = int(self.max_size / 2) - 1
selection = selection[:split] + '..' + selection[-split:]
else:
selection = selection[:self.max_size]
return {
'cached_until': self.py3.time_in(s
|
elf.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'selection': selection})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3
|
status.module_test import module_test
module_test(Py3status)
|
marcelogomess/glpi_api
|
glpi_api/api.py
|
Python
|
bsd-2-clause
| 6,202
| 0.004353
|
# -*- coding: utf-8 -*-
from requests import post, get, put, delete
class Api:
def __init__(self, base_url, user_token, app_token):
self.base_url = base_url
self.app_token = app_token
self.user_token = user_token
def initSession(self):
target_url = 'initSe
|
ssion/'
sessiondata = {'Content-Type': 'application/json',
'Authorization': 'user_token ' + self.user_token, 'App-Token': self.app_token}
session = get(self.base_url + target_
|
url, headers=sessiondata)
self.session_token = session.json()
self.session_token = self.session_token['session_token']
def killSession(self):
target_url = 'killSession'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.content)
def getMyProfiles(self):
target_url = 'getMyProfiles/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getActiveProfile(self):
target_url = 'getActiveProfile/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def changeActiveProfile(self, profile_id):
target_url = 'getActiveProfile/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = post(self.base_url + target_url, headers=sessiondata, data=str(profile_id))
return (session.json())
def getMyEntities(self):
target_url = 'getMyEntities/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getActiveEntities(self):
target_url = 'getActiveEntities/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getFullSession(self):
target_url = 'getFullSession/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getItem(self, item_type, item_id, args=None):
if args is None:
target_url = item_type + '/' + '/' + item_id
else:
target_url = item_type + '/' + '/' + item_id + '?' + args
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getAllItems(self, item_type, args=None):
if args is None:
target_url = item_type + '/'
else:
target_url = item_type + '/' + '?' + args
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getSubItems(self, item_type, item_id, sub_item_type, args=None):
if args is None:
target_url = item_type + '/' + item_id + '/' + sub_item_type
else:
target_url = item_type + '/' + item_id + '/' + sub_item_type + '?' + args
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def getMultipleItems(self, items_dict):
for items in items_dict['items']:
items_list = self.getItem(item_type='Ticket', item_id='69130', args='expand_dropdowns=true')
return (items_list)
def listSearhItemsOptions(self, item_type, args=None):
if args is None:
target_url = 'listSearchOptions/' + item_type + '/'
else:
target_url = 'listSearchOptions/' + item_type + '/' + '?' + args
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = get(self.base_url + target_url, headers=sessiondata)
return (session.json())
def searchItems(self):
return(True)
def addItems(self, item_type, item_data):
target_url = item_type+'/'
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = post(self.base_url + target_url, headers=sessiondata, json=item_data)
return (session.json())
def updateItems(self,item_type, item_id, item_data):
target_url = item_type+'/'+str(item_id)
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = put(self.base_url + target_url, headers=sessiondata, json=item_data)
return (session.json())
def deleteItems(self,item_type, item_id):
target_url = item_type+'/'+str(item_id)
sessiondata = {'Content-Type': 'application/json',
'Session-Token': self.session_token, 'App-Token': self.app_token}
session = delete(self.base_url + target_url, headers=sessiondata)
return(session.json())
|
braincorp/robustus
|
robustus/tests/test_bullet.py
|
Python
|
mit
| 775
| 0.00129
|
# =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE fi
|
le)
# =============================================================================
import pytest
from robustus.detail import perform_standard_test
def test_bullet_installation(tmpdir):
tmpdir.chdir()
|
bullet_versions = ['bc2']
for ver in bullet_versions:
bullet_files = ['lib/bullet-%s/lib/libBulletCollision.a' % ver,
'lib/bullet-%s/lib/libBulletDynamics.a' % ver,
'lib/bullet-%s/lib/libLinearMath.a' % ver]
perform_standard_test('bullet==%s' % ver, [], bullet_files)
if __name__ == '__main__':
pytest.main('-s %s -n0' % __file__)
|
jumpserver/jumpserver
|
apps/jumpserver/settings/__init__.py
|
Python
|
gpl-3.0
| 153
| 0
|
# -*- coding: utf-8 -*-
#
from .
|
base import *
from .logging import *
from .libs import *
from .auth import *
from .custom import *
from ._xpack
|
import *
|
LaMi-/pmatic
|
pmatic/api.py
|
Python
|
gpl-2.0
| 31,401
| 0.006019
|
#!/usr/bin/env python
# encoding: utf-8
#
# pmatic - Python API for Homematic. Easy to use.
# Copyright (C) 2016 Lars Michelsen <lm@larsmichelsen.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Provides the API interface to the CCU
This module provides you with the low level API of pmatic to the CCU.
Low level API means that it cares about connecting to the interfaces on
the CCU, authenticates with it and accesses the API calls and makes them
all available in the Python code. So that you can simply call methods on
the API object to make API calls and get Python data structures back.
The most important function of this module is the init() function. This
is the function you should call in your program code to initialize the
API object. It detects whether or not the code is run on the CCU or
a remote connection from another system is made to the CCU.
"""
# Add Python 3.x behaviour to 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import sys
import json
import time
import atexit
import threading
# Specific for the LocalAPI()
import subprocess
# Specific for the RemoteAPI()
try:
from urllib.request import urlopen, Request
from urllib.error import URLError
from http.client import BadStatusLine
except ImportError:
from urllib2 import urlopen, Request
from urllib2 import URLError
from httplib import BadStatusLine
from pmatic.exceptions import PMException, PMConnectionError
import pmatic.utils as utils
def init(mode=None, **kwargs):
"""Wrapper to create the API object you need to acces the CCU API.
By default it detects whether or not this code is being executed on the CCU
or on another system. And initializes either a LocalAPI() object when run
directly on a CCU or, in all other cases, a RemoteAPI() object. This object
is then being returned.
You can provide the mode argument to disable auto detection and either set
it to "local" to enforce a LocalAPI() object to be created or "remote" to
enforce a RemoteAPI() object.
In case a RemoteAPI() object is being created, you need to provide at least
the additional keyword arguments address="http://[HOST]" which needs to
contain the base URL to your CCU together with credentials=("[USER]", "PASSWORD")
which must be valid credentials to authenticate with the CCU.
"""
if mode is None:
mode = utils.is_ccu() and "local" or "remote"
if mode == "local":
if not utils.is_ccu():
raise PMException("local mode can only be used on the CCU.")
return LocalAPI()
elif mode == "remote":
try:
return RemoteAPI(**kwargs)
except TypeError as e:
raise PMException("You need to provide at least the address and credentials "
"to access your CCU (%s)." % e)
else:
raise PMException("Invalid mode given. Valid ones are \"local\" and \"remote\".")
class AbstractAPI(utils.LogMixin):
"""An abstract implementation of the pmatic low level API.
This is the base class for all specific API classes, which are currently
LocalAPI() and RemoteAPI().
"""
_constructed = False
@classmethod
def _replace_wrong_encoded_json(self, text):
return text.replace("\\{", "{")\
.replace("\\[", "[")\
.replace("\\/", "/")
def __init__(self):
super(AbstractAPI, self).__init__()
self._methods = {}
self._fail_exc = None
self._initialized = False
# For simplicity we only allow one thread to perform API calls at the time
self._api_lock = threading.RLock()
# is called in locked context
def _register_atexit_handler(self):
"""Can be called to register a cleanup handler on interpreter exit.
The APIs can reg
|
ister this to ensures the close() method is called
on interpreter shutd
|
own."""
atexit.register(self.close)
# is called in locked context
def _parse_api_response(self, method_name_int, kwargs, body):
# FIXME: The ccu is performing wrong encoding at least for output of
# executed rega scripts. But maybe this is a generic problem. Let's see
# and only fix the known issues for the moment.
if method_name_int in [ "rega_run_script", "interface_get_paramset_description",
"room_get_all" ]:
body = AbstractAPI._replace_wrong_encoded_json(body)
try:
msg = json.loads(body)
except Exception as e:
raise PMException("Failed to parse response to %s (%s):\n%s\n" %
(method_name_int, e, body))
if msg["error"] is not None:
if msg["error"]["code"] == 501 and not self._call('rega_is_present'):
raise PMConnectionError("The logic layer (ReGa) is not available (yet). When "
"the CCU has just been started, please wait some time "
"and retry.")
else:
raise PMException("[%s] %s: %s (Code: %s, Request: %r)" % (method_name_int,
msg["error"]["name"],
msg["error"]["message"],
msg["error"]["code"],
kwargs))
return msg["result"]
# is called from unlocked context
def __del__(self):
"""When object is removed, the close() method is called."""
if self._constructed:
self.close()
# is called from unlocked context
def __getattr__(self, method_name_int):
"""Realizes dynamic methods based on the methods supported by the API.
The method names are nearly the same as provided by the CCU
(see http://[CCU_ADDRESS]/api/homematic.cgi or API.print_methods()).
The method names are slighly renamed. For example CCU.getSerial() is
available as API.ccu_get_serial() in pmatic. The translation is made
by the _to_internal_name() method. For details take a look at that
function.
"""
with self._api_lock:
self._initialize()
def lowlevel_call(*args, **kwargs):
if args:
raise PMException("You need to specify your arguments as named arguments. "
"For example api.sys_var_get_value_by_name(name=\"...\").")
return self._call(method_name_int, **kwargs)
return lowlevel_call
# is called in locked context
def _initialize(self):
if self.initialized:
return
self._fail_exc = None
self.logger.debug("[API] Initializing...")
try:
self._initialize_api()
self._initialized = True
self.logger.debug("[API] Initialized")
except Exception as e:
self._initialized = False
self._fail_exc = e
raise
def _to_internal_name(self, method_name_api):
"""Translates a raw API method name to the pmatic notation.
These modifications are made:
* . are replaced with _
* BidCoS is replaced with bidcos
* ReGa is replaced with rega
* whole string is tra
|
0xalen/opencaster_isdb-tb
|
libs/dvbobjects/dvbobjects/MPEG/DVBH_Descriptors.py
|
Python
|
gpl-2.0
| 10,359
| 0.029443
|
#! /usr/bin/env python
#
# Copyright (C) 2004 Andreas Berger, berger@ftw.at
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import string
from dvbobjects.utils import *
from dvbobjects.MPEG.Descriptor import Descriptor
class ip_mac_platform_name_descriptor(Descriptor):
descriptor_tag = 0x0c
def bytes(self):
fmt = "!3s%ds" % len(self.text_char_bytes)
return pack(fmt,
self.ISO_639_language_code,
self.text_char_bytes
)
class ip_mac_platform_provider_name_descriptor(Descriptor):
descriptor_tag = 0x0d
def bytes(self):
fmt = "!3s%ds" % len(self.text_char_bytes)
return pack(fmt,
s
|
elf.ISO_639_language_code,
self.text_char_bytes
)
class target_serial_number_descriptor(Descriptor):
descriptor_tag = 0x08
def bytes(self):
fmt = "!%ds" % len(serial_data_bytes)
return pack(fmt,
self.serial_data_bytes
)
class target_smartcard_descriptor(Descriptor):
descriptor_tag = 0x06
|
def bytes(self):
fmt = "!I%ds" % len(self.private_data_bytes)
return pack(fmt,
self.super_CA_system_id,
self.private_data_bytes
)
class target_MAC_address_descriptor(Descriptor):
descriptor_tag = 0x07
def bytes(self):
fmt = "!6s%ds" % len(self.mac_addr_bytes)
return pack(fmt,
self.mac_addr_mask,
self.mac_addr_bytes
)
class target_MAC_address_range_descriptor(Descriptor):
descriptor_tag = 0x0e
def bytes(self):
fmt = "!6s%ds" % len(self.mac_addr_bytes)
return pack(fmt,
self.mac_addr_mask,
self.mac_addr_bytes
)
class target_IP_address_descriptor(Descriptor):
descriptor_tag = 0x09
def bytes(self):
fmt = "!I%ds" % len(self.IPv4_addr_bytes)
return pack(fmt,
self.IPv4_addr_mask,
self.IPv4_addr_bytes
)
class target_IP_slash_descriptor(Descriptor):
descriptor_tag = 0x0f
def bytes(self):
fmt = "!4BB"
return pack(fmt,
self.IPv4_addr[0],
self.IPv4_addr[1],
self.IPv4_addr[2],
self.IPv4_addr[3],
self.IPv4_slash_mask
)
class target_IP_source_slash_descriptor(Descriptor):
descriptor_tag = 0x10
def bytes(self):
fmt = "!%ds" % len(self.IPv4_source_dest_bytes)
return pack(fmt,
self.IPv4_source_dest_bytes
)
class target_IPv6_address_descriptor(Descriptor):
descriptor_tag = 0x0a
def bytes(self):
fmt = "!7s%ds" % len(self.IPv6_address_bytes)
return pack(fmt,
self.IPv6_address_mask,
self.IPv6_address_bytes
)
class target_IPv6_slash_descriptor(Descriptor):
descriptor_tag = 0x11
def bytes(self):
fmt = "!%ds" % len(self.IPv6_bytes)
return pack(fmt,
self.IPv6_bytes
)
class target_IPv6_source_slash_descriptor(Descriptor):
descriptor_tag = 0x12
def bytes(self):
fmt = "!%ds" % len(self.IPv6_source_dest_bytes)
return pack(fmt,
self.IPv6_source_dest_bytes
)
class ip_mac_stream_location_descriptor(Descriptor):
descriptor_tag = 0x13
def bytes(self):
fmt = "!HHHHB"
return pack(fmt,
self.network_id,
self.original_network_id,
self.transport_stream_id,
self.service_id,
self.component_tag
)
class isp_access_mode_descriptor(Descriptor):
descriptor_tag = 0x14
def bytes(self):
fmt = "!B"
return pack(fmt,
self.access_mode
)
class telephone_descriptor(Descriptor):
descriptor_tag = 0x57
def bytes(self):
fmt = "!BBBB%ds%ds%ds%ds%ds" % (len(country_prefix_bytes), len(international_area_code_bytes), len(operator_code_bytes), len(national_area_code_bytes), len(core_number_bytes))
return pack(fmt,
(0x03 << 7) & 0xC0 | (self.foreign_availability << 5) & 0x20 | self.connection_type & 0x1F,
(0x01 << 7) & 0x80 | (self.country_prefix_length << 5) & 0x60 | (self.international_area_code << 4) & 0x1C | self.operator_code_length & 0x07,
(0x01 << 7) & 0x80 | (self.national_area_code_length << 4) & 0x70 | self.core_number_length & 0x0F,
country_prefix_bytes,
international_area_code_bytes,
operator_code_bytes,
national_area_code_bytes,
core_number_bytes
)
class private_data_specifier_descriptor(Descriptor):
descriptor_tag = 0x5f
def bytes(self):
fmt = "!I"
return pack(fmt,
self.private_data_specifier
)
class time_slice_fec_identifier_descriptor(Descriptor):
descriptor_tag = 0x77
def bytes(self):
time_slice_fec_id = 0x00;
fmt = "!BBB"
return pack(fmt,
(self.time_slicing << 7) & 0x80 | (self.mpe_fec << 5) & 0x60 | (0x03 << 3) & 0x18 | self.frame_size & 0x07,
self.max_burst_duration,
(self.max_average_rate << 4) & 0xF0 | time_slice_fec_id & 0x0F,
)
# FIXME: move this class to another file, it's no descriptor
class platform_id_data2(DVBobject):
def pack(self):
fmt = "!BBBBB"
return pack(fmt,
(self.platform_id >> 16) & 0xFF,
(self.platform_id >> 8) & 0xFF,
self.platform_id & 0xFF,
self.action_type & 0xFF,
(0x03 << 6) & 0xC0 | (0x00 << 5) & 0x20 | 0x01 & 0x1F
)
# FIXME: move this class to another file, it's no descriptor
class ip_mac_notification_info(DVBobject):
def pack(self):
# pack platform id data loop
pid_bytes = string.join(
map(lambda x: x.pack(),
self.platform_id_data_loop),
"")
platform_id_data_length = len(pid_bytes);
fmt = "!B%ds%ds" % (platform_id_data_length, len(self.private_data_bytes))
return pack(fmt,
platform_id_data_length,
pid_bytes,
self.private_data_bytes
)
# FIXME: move this class to another file, it's no descriptor
class platform_name(DVBobject):
def pack(self):
platform_name_length = len(self.text_char_bytes)
fmt = "!3sB%ds" % platform_name_length
return pack(fmt,
self.ISO_639_language_code,
platform_name_length,
self.text_char_bytes
)
# FIXME: move this class to another file, it's no descriptor
class platform_id_data(DVBobject):
def pack(self):
pn_bytes = string.join(
map(lambda x: x.pack(),
self.platform_name_loop),
|
joeyac/JudgeServer
|
client/languages.py
|
Python
|
mit
| 2,209
| 0.002263
|
# coding=utf-8
from __future__ import unicode_literals
c_lang_config = {
"name": "c",
"compile": {
"group_memory": True,
"src_name": "main.c",
"exe_name": "main",
"max_cpu_time": 5.0,
"max_real_time": 10.0,
"max_memory": 512 * 1024, # 512M compile memory
"compile_command": "/usr/bin/gcc -DONLINE_JUDGE -O2 -w -fmax-errors=3 -std=c99 {src_path} -lm -o {exe_path}",
},
"run": {
"exe_name": "main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "{exe_path}",
}
}
cpp_lang_config = {
"name": "c++",
"compile": {
"group_memory": True,
"src_name": "main.cpp",
"exe_name":
|
"main",
"max_cpu_time": 5.0,
"max_real_time": 10.0,
"max_memory": 512 * 1024, # 512M compile memory
"compile_command": "/usr/bin/g++ -DONLINE_JUDGE -O2 -w -fmax-errors=3 -st
|
d=c++11 {src_path} -lm -o {exe_path}",
},
"run": {
"exe_name": "main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "{exe_path}",
}
}
java_lang_config = {
"name": "java",
"compile": {
"group_memory": True,
"src_name": "Main.java",
"exe_name": "Main",
"max_cpu_time": 3.0,
"max_real_time": 5.0,
"max_memory": -1,
"compile_command": "/usr/bin/javac {src_path} -d {exe_name} -encoding UTF8"
},
"run": {
"group_memory": True,
"exe_name": "Main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "/usr/bin/java -cp {exe_name} Main",
}
}
py2_lang_config = {
"name": "python2",
"compile": {
"src_name": "solution.py",
"exe_name": "solution.pyc",
"max_cpu_time": 3000,
"max_real_time": 5000,
"max_memory": 128 * 1024 ,
"compile_command": "/usr/bin/python -m py_compile {src_path}",
},
"run": {
"exe_name": "solution.pyc",
"command": "/usr/bin/python {exe_path}",
}
}
|
JeGoi/IPa2
|
packages/java_properties.py
|
Python
|
mit
| 6,870
| 0.006259
|
#!/usr/bin/env python
"""
Title : Java program file
Author : JG
Date : dec 2016
Objet : script to create Propertie File Program
in : get infos from yml
out : print infos in properties file
"""
import sys,os
import yaml
import util as u
from random import randint
# ===============================================
# FUNCTION create Java File Properties
# in : get infos from csv
# out : print infos in java file
# ===============================================
def create_properties_file(yml,armaDir):
progDir = u.define_prop_path(armaDir)
filename = progDir+""+u.get_program_name(yml)+".properties"
out = open(filename, 'w')
out.write("#Armadillo Workflow Platform 1.1 (c) Etienne Lord, Mickael Leclercq, Alix Boc, Abdoulaye Banire Diallo, Vladimir Makarenkov"+
"\n#"+yml['author']+
"\n#"+yml['date']+
"\n#Pgrogram info"+
"\nName= "+yml['Program']['name']+
"\nClassName= programs."+u.get_program_name(yml)+""+
"\nEditorClassName= editors."+u.get_program_name(yml)+"Editors"+
"\ndebug= false"+
"\nfilename= C\:\\armadillo2\\data\\properties\\"+u.get_program_name(yml)+".properties")
for paths in yml['Program']['executablePaths']:
out.write("\n"+paths+"="+yml['Program']['executablePaths'][paths])
out.write("\nHelpSupplementary=")
if yml['Program']['helpSupplementary']:
out.write(yml['Program']['helpSupplementary'])
out.write("\nPublication= ")
if yml['Program']['publication']:
out.write(yml['Program']['publication'])
out.write("\nDescription= ")
if yml['Program']['desc']:
out.write(yml['Program']['desc'])
ObjectID = randint(1000000000,9999999999)
out.write("\nObjectID="+u.get_program_name(yml)+"_"+str(ObjectID)+""+
"\nObjectType=Program"+
"\nNoThread=false")
out.write("\nType=")
if yml['Program']['menu']:
out.write(yml['Program']['menu'])
out.write("\nNormalExitValue=")
if yml['Program']['exitValue'] or yml['Program']['exitValue'] == 0:
out.write(str(yml['Program']['exitValue']))
out.write("\nVerifyExitValue=")
if yml['Program']['exitValue']:
out.write('true')
else:
out.write('false')
out.write("\nWebServices=")
if yml['Program']['webServices']:
out.write(yml['Program']['webServices'])
out.write("\nWebsite=")
if yml
|
['Program']['website']:
out.write(yml['Program']['website'])
# Color options
color = u.get_color(yml)
out.write("\ncolorMode = "+color+""+
"\ndefaultColor = "+color+"")
# Inputs types
out.write("\n#INP
|
UTS TYPES")
if len(yml['Inputs']) > 0:
o = ""
s = ""
for op in yml['Inputs']:
if op['type']:
out.write("\nInput"+op['type']+"=Connector"+str(op['connector']))
if op['OneConnectorOnlyFor']:
if o == "":
o = str(op['OneConnectorOnlyFor'])
else:
t = str(op['OneConnectorOnlyFor'])
if t not in o:
o = o+","+t
if op['SolelyConnectors']:
if s == "":
s = str(op['SolelyConnectors'])
else:
t = str(op['SolelyConnectors'])
if t not in o:
s = s+","+t
# Inputs options
if o != "" or s != "":
out.write("\n#INPUTS OPTIONS")
if o != "":
out.write("\nOneConnectorOnlyFor="+o)
if s != "":
out.write("\nSolelyConnectors= "+s)
else:
out.write("\nNO IMPUTS ??\n")
# Inputs Names
out.write("\n#INPUTS Connector text")
tab = ('2','3','4')
for t in tab:
c = ""
if len(yml['Inputs']) > 0:
for op in yml['Inputs']:
o = str(op['connector'])
if t in o or "true" in o:
if c == "":
c = str(op['connectorText'])
else:
s = str(op['connectorText'])
if s not in c:
c = c+", "+s
if c != "":
out.write("\nConnector"+t+"= "+c)
# Number of inputs
out.write("\nnbInput= ")
if yml['Program']['numImputs']:
out.write(str(yml['Program']['numImputs']))
# Outputs values
out.write("\n#OUTPUTS OPTIONS"+
"\nConnector0Output=True"+
"\nOutputResults=Connector0"+
"\nOutputOutputText=Connector0")
if len(yml['Outputs']) > 0:
for op in yml['Outputs']:
if op['type']:
out.write("\nOutput"+op['type']+"=Connector0")
# Default Values
out.write("\n#DEFAULT VALUES"+
"\ndefaultPgrmValues=")
for Panel in yml['Menus']:
pNameS = u.name_without_space(Panel['name'])
if 'Panel' not in Panel:
# Means default option
out.write(""+pNameS+"<>true<>")
else:
for Tab in Panel['Panel']:
if 'Arguments' in Tab:
tName = Tab['tab']
for Arguments in Tab['Arguments']:
cName = Arguments['name']
if 'values' in Arguments and \
Arguments['values'] is not None and \
Arguments['values']['vType'] is not None:
vType = Arguments['values']['vType']
v = u.create_value_name(pNameS,tName,cName,vType)
vDef = str(Arguments['values']['vDefault'])
out.write(v+"<>"+vDef+"<>")
out.write("\n#Cluster")
if 'Cluster' in yml and yml['Cluster'] is not None:
if 'ClusterProgramName' in yml['Cluster']:
out.write("\nClusterProgramName="+yml['Cluster']['ClusterProgramName'])
if 'ExecutableCluster' in yml['Cluster']:
out.write("\nExecutableCluster="+yml['Cluster']['ExecutableCluster'])
if 'version' in yml['Program']:
out.write("\nVersion= "+u.get_program_version(yml)+"")
out.write("\n#Docker")
if 'Docker' in yml and yml['Docker'] is not None:
if 'DockerImage' in yml['Docker']:
out.write("\nDockerImage="+yml['Docker']['DockerImage'])
if 'ExecutableDocker' in yml['Docker']:
out.write("\nExecutableDocker="+yml['Docker']['ExecutableDocker'])
if 'DockerInputs' in yml['Docker']:
out.write("\nDockerInputs="+yml['Docker']['DockerInputs'])
if 'DockerOutputs' in yml['Docker']:
out.write("\nDockerOutputs="+yml['Docker']['DockerOutputs'])
|
russomi/ferris3-tutorial
|
app/jobposts/models.py
|
Python
|
apache-2.0
| 294
| 0
|
__author__ = 'russomi'
from google.appengine.ext import ndb
from ..employers.models import Employer
from ferris3 i
|
mport Model
class JobPost(Model):
employer = ndb.KeyProperty(kind=Employer)
title = ndb.String
|
Property(required=True)
description = ndb.TextProperty(required=True)
|
jteehan/cfme_tests
|
utils/tests/test_metadoc.py
|
Python
|
gpl-2.0
| 364
| 0
|
import pytest
pytestmark = pytest.mark.meta(from_pytest='yep')
@pytest.mark.meta(fro
|
m_decorator='seems to be')
def test_metadoc(meta):
"""This test function has a docstring!
|
Metadata:
valid_yaml: True
"""
assert meta['from_docs']['valid_yaml']
assert meta['from_pytest'] == 'yep'
assert meta['from_decorator'] == 'seems to be'
|
imito/odin
|
odin/preprocessing/text.py
|
Python
|
mit
| 24,225
| 0.010485
|
# -*- coding: utf-8 -*-
# ===========================================================================
# Popular encoding:
# utf-8
# ISO-8859-1
# ascii
# encode: string -> string of bytes
# decode: string of bytes -> string
# ===========================================================================
from __future__ import print_function, division, absolute_import
import timeit
import string
from collections import OrderedDict, Iterator, Iterable, defaultdict, Mapping
from abc import abstractmethod, ABCMeta
from six import add_metaclass, string_types
import numpy as np
from odin.utils import as_tuple, Progbar, is_string, is_number
from odin.stats import freqcount
from multiprocessing import Pool, cpu_count
from odin.preprocessing.signal import pad_sequences
_nlp = {}
_stopword_list = []
# ===========================================================================
# Helper
# ===========================================================================
def language(lang='en'):
"""Support language: 'en', 'de' """
import spacy
lang = lang.lower()
if lang not in ('en', 'de'):
raise ValueError('We only support languages: en-English, de-German.')
if lang not in _nlp:
if lang == 'en':
# "en_core_web_md"
_nlp[lang] = spacy.load(lang)
return _nlp[lang]
def add_stopword(words):
words = as_tuple(words, t=string_types)
for w in words:
_stopword_list.append(w)
def is_stopword(word, lang='en'):
nlp = language(lang)
# ====== check self-defined dictionary ====== #
if word in _stopword_list:
return True
# ====== check in spacy dictionary ====== #
if word not in nlp.vocab.strings:
return False
lexeme = nlp.vocab[nlp.vocab.strings[word]]
return lexeme.is_stop
def is_oov(word, lang='en'):
""" Check if a word is out of dictionary """
nlp = language(lang)
if word not in nlp.vocab.strings:
return True
return False
# ===========================================================================
# Text preprocessor
# ===========================================================================
@add_metaclass(ABCMeta)
class TextPreprocessor(object):
""" A Preprocessor takes a string and return a preprocessed string
a list of strings which represented a list of tokens.
"""
@abstractmethod
def preprocess(self, text):
pass
def __call__(self, text):
if isinstance(text, (tuple, list)):
return [self.preprocess(text) for t in text]
else:
return self.preprocess(text)
class CasePreprocessor(TextPreprocessor):
"""Remove case, name, and split the text"""
def __init__(self, lower, keep_name=True, split=' '):
super(CasePreprocessor, self).__init__()
self.lower = bool(lower)
self.split = split
self.keep_name = keep_name
def preprocess(self, text):
if self.split is not None:
text = text.split(' ')
if self.lower:
text = [t if self.keep_name and t.isupper()
else t.lower()
for t in text if len(t) > 0]
elif self.lower:
text = text.lower()
return text
class TransPreprocessor(TextPreprocessor):
""" Substitute a set of character to a new characters """
def __init__(self, old='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
new=' '):
super(TransPreprocessor, self).__init__()
self.__str_trans = string.maketrans(old, new * len(old))
new = None if len(new) == 0 else unicode(new)
old = unicode(old)
self.__uni_trans = dict((ord(char), new) for char in old)
def preprocess(self, text):
if isinstance(text, (tuple, list)):
text = ' '.join(text)
# ====== translate the text ====== #
if isinstance(text, unicode):
text = text.translate(self.__uni_trans)
else:
text = text.translate(self.__str_trans)
return text.strip()
# ===========================================================================
# Simple Token filter
# ===========================================================================
@add_metaclass(ABCMeta)
class TokenFilter(object):
""" A Filter return a "length > 0" token if the token is accepted
and '' token otherwise.
This allows not only token filtering but also token transforming.
"""
@abstractmethod
def filter(self, token, pos, is_stop, is_oov):
pass
def __call__(self, token, pos):
return self.filter(token, pos)
class TYPEfilter(TokenFilter):
""" Simplest form of filter
is_alpha: alphabetic characters only (i.e. no space and digit)
is_digit: consists of digits only
is_ascii: all character must be ASCII
is_title: the first letter if upper-case
is_punct: punctuation
* if any value True, the token with given type is accepted
Paramters
---------
keep_oov: bool
if True keep all the "out-of-vocabulary" tokens, else ignore them
"""
def __init__(self, is_alpha=False, is_digit=False, is_ascii=False,
is_title=False):
super(TYPEfilter, self).__init__()
self.is_alpha = is_alpha
self.is_digit = is_digit
self.is_ascii = is_ascii
self.is_title = is_title
def filter(self, token, pos):
flags = [
self.is_alpha and token.isalpha(),
self.is_digit and token.isdigit(),
self.is_title and token.istitle(),
self.is_ascii and all(ord(c) < 128 for c in token)
]
if any(flags):
return token
return ''
class POSfilter(TokenFilter):
""" we use universal tag-of-speech for filtering token
NOUN : noun
PRON : pronoun (I, me, he, she, herself, you, it, that, etc.)
PROPN: proper noun (name of individual person, place, ... spelled with an initial capital letter, e.g. Jane, London)
ADJ : adjective
VERB : verb
ADV : adverb
ADP : adposition (prepositional, postpositional, and circumpositional phrases)
AUX : auxiliary
DET : determiner
INTJ : interjection
NUM : numeral
PART : particle
PUNCT: punctuation
SCONJ: sub
|
ordinating conjunction
SYM : symbol
X : other
"""
def __init__(self, NOUN=True, PRON=False, PROPN=True,
ADJ=True, VERB=False, ADV=True,
ADP=False, AUX=Fals
|
e, DET=False, INTJ=False,
NUM=False, PART=False, PUNCT=False,
SCONJ=False, SYM=False, X=False):
super(POSfilter, self).__init__()
pos = []
if NOUN: pos.append('NOUN')
if PRON: pos.append('PRON')
if PROPN: pos.append('PROPN')
if ADJ: pos.append('ADJ')
if ADP: pos.append('ADP')
if ADV: pos.append('ADV')
if AUX: pos.append('AUX')
if DET: pos.append('DET')
if INTJ: pos.append('INTJ')
if NUM: pos.append('NUM')
if PART: pos.append('PART')
if PUNCT: pos.append('PUNCT')
if SCONJ: pos.append('SCONJ')
if SYM: pos.append('SYM')
if VERB: pos.append('VERB')
if X: pos.append('X')
self.pos = pos
def filter(self, token, pos):
"""
Paramters
---------
tokens: list
pass
Return
------
list of accepted tokens
"""
if pos is None or pos in self.pos:
return token
return ''
# ===========================================================================
# Preprocessing data
# ===========================================================================
# static variables for multiprocessing
def _preprocess_func(doc):
preprocessors = globals()['__preprocessors']
filters = globals()['__filters']
charlevel = globals()['__charlevel']
lang = globals()['__lang']
lemma = globals()['__lemma']
stopwords = globals()['__stopwords']
doc_tokens = []
# preprocessing document
for p in preprocessors:
doc = p(doc)
# auto split if the doc haven't been splitted
if isinstance(doc, string_types):
doc = doc.split(' ')
# ====== start processing ====== #
for token in doc:
if len(token) > 0:
# ignore stopwords if requred
if not stopwords and is_stopword(token, lang):
continue
# check if token is accepted
if filters is not None:
for f in filters:
token = f(token, None)
token = token.strip()
# ignore token if it is removed
if len(token) == 0: continue
# normalize the token
if lemma:
pass
# word-le
|
FTwO-O/pyShadowsocks
|
pyshadowsocks/packet/packet_header.py
|
Python
|
mit
| 320
| 0.003125
|
#!/usr
|
/bin/env python
# -*- coding: utf-8 -*-
#
# Author: booopooob@gmail.com
#
# Info:
#
#
#
import abc
from util import FixedDict
clas
|
s PacketHeader(FixedDict, metaclass=abc.ABCMeta):
@abc.abstractmethod
def to_bytes(self):
pass
@abc.abstractmethod
def from_bytes(self, data):
pass
|
botswana-harvard/edc-dashboard
|
edc_dashboard/templatetags/edc_dashboard_extras.py
|
Python
|
gpl-2.0
| 3,196
| 0.000626
|
from django import template
from django.urls.base import reverse
from urllib.parse import urljoin, parse_qsl, urlencode, unquote
register = template.Library()
class Number:
def __init__(self, number=None, url=None, current=None):
self.number = number
|
self.url = url
self.active = 'active' if current else ''
def __str__(self):
return self.number
def __repr__(self):
return f'{self.__class__.__name__}<number={
|
self.number} {self.active}>'
class UrlMaker:
def __init__(self, base=None, querystring=None):
self.base = base
self.querystring = querystring
def url(self, page):
url = urljoin(self.base, str(page)) + '/'
if self.querystring:
return '?'.join([url, self.querystring])
return url
def page_numbers(page, numpages, display=None):
"""Returns a list of x integers (display) relative to the value of n
where n > 0 and the length of the list cannot exceed count.
"""
page_numbers = None
if page and numpages:
min_n = page - 5
min_n = 1 if min_n <= 0 else min_n
max_n = min_n + 9
max_n = numpages if max_n >= numpages else max_n
page_numbers = [x for x in range(min_n, max_n + 1)]
return page_numbers or []
@register.inclusion_tag('edc_dashboard/paginator/paginator_row.html', takes_context=True)
def paginator_row(context):
numbers = []
first_url = None
previous_url = None
next_url = None
last_url = None
sub_text = None
paginator_url = context.get('paginator_url')
paginator = context.get('paginator')
page_obj = context.get('page_obj')
querystring = context.get('querystring')
search_term = context.get('search_term')
show = page_obj.has_other_pages()
paginator_url = reverse(
paginator_url, kwargs=context.get('paginator_url_kwargs'))
if querystring:
if '?' in querystring:
querystring = querystring.split('?')[1]
query_dict = parse_qsl(querystring)
querystring = unquote(urlencode(query_dict))
if show:
url_maker = UrlMaker(base=paginator_url, querystring=querystring)
if page_obj.has_previous():
first_url = url_maker.url(1)
previous_url = url_maker.url(page_obj.previous_page_number())
if page_obj.has_next():
next_url = url_maker.url(page_obj.next_page_number())
last_url = url_maker.url(paginator.num_pages)
for page in page_numbers(page_obj.number, paginator.num_pages):
current = page_obj.number == page
url = '#'
if not current:
url = url_maker.url(page)
numbers.append(
Number(number=page, url=url, current=current))
sub_text = (
f'Showing items {page_obj.start_index()} to {page_obj.end_index()} '
f'of {paginator.count}.')
return dict(
page_obj=page_obj,
show=show,
first_url=first_url,
previous_url=previous_url,
next_url=next_url,
last_url=last_url,
numbers=numbers,
search_term=search_term,
sub_text=sub_text)
|
hzdg/django-google-search
|
googlesearch/views.py
|
Python
|
mit
| 2,809
| 0.002848
|
from django.views.generic import TemplateView
#from apiclient.discovery import build
from googleapiclient.discovery import build
from .utils import SearchResults
from . import *
class SearchView(TemplateView):
template_name = "googlesearch/search_results.html"
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
service = build("customsearch", GOOGLE_SEARCH_API_VERSION,
developerKey=GOOGLE_SEARCH_API_KEY)
#add a "try" block to see if googleapiclient throws a 400 error
try:
results = service.cse().list(
q=self.request.GET.get('q', ''),
start=self.page
|
_to_index(),
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
results = SearchResults(results)
pages = self.calculate_pages()
#if googleapiclient raises an error, we need to catch it here
except:
#run the search again starting with a defined page 1 instead of the "user" defined
result
|
s = service.cse().list(
q=self.request.GET.get('q', ''),
start=1,
num=GOOGLE_SEARCH_RESULTS_PER_PAGE,
cx=GOOGLE_SEARCH_ENGINE_ID,
).execute()
#set some default values used for the context below
page = 1
# previous, current, next pages
pages = [0, 1, 2]
results = SearchResults(results)
""" Set some defaults """
context.update({
'items': [],
'total_results': 0,
'current_page': 0,
'prev_page': 0,
'next_page': 0,
'search_terms': self.request.GET.get('q', ''),
'error': results
})
""" Now parse the results and send back some
useful data """
context.update({
'items': results.items,
'total_results': results.total_results,
'current_page': pages[1],
'prev_page': pages[0],
'next_page': pages[2],
'search_terms': results.search_terms,
})
return context
def calculate_pages(self):
""" Returns a tuple consisting of
the previous page, the current page,
and the next page """
current_page = int(self.request.GET.get('p', 1))
return (current_page - 1, current_page, current_page + 1)
def page_to_index(self, page=None):
""" Converts a page to the start index """
if page is None:
page = self.request.GET.get('p', 1)
return int(page) * int(GOOGLE_SEARCH_RESULTS_PER_PAGE) + 1 - int(GOOGLE_SEARCH_RESULTS_PER_PAGE)
|
zsdonghao/tensorlayer
|
tensorlayer/app/human_pose_estimation/common.py
|
Python
|
apache-2.0
| 16,077
| 0.001928
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
# Reference:
- [pose_lcn](
https://github.com/rujiewu/pose_lcn)
- [3d-pose-baseline](
https://github.com/una-dinosauria/3d-pose-baseline)
"""
import tensorflow as tf
import numpy as np
import pickle
import matplotlib.pyplot as plt
import os
import matplotlib.gridspec as gridspec
H36M_NAMES = [''] * 17
H36M_NAMES[0] = 'Hip'
H36M_NAMES[1] = 'RHip'
H36M_NAMES[2] = 'RKnee'
H36M_NAMES[3] = 'RFoot'
H36M_NAMES[4] = 'LHip'
H36M_NAMES[5] = 'LKnee'
H36M_NAMES[6] = 'LFoot'
H36M_NAMES[7] = 'Belly'
H36M_NAMES[8] = 'Neck'
H36M_NAMES[9] = 'Nose'
H36M_NAMES[10] = 'Head'
H36M_NAMES[11] = 'LShoulder'
H36M_NAMES[12] = 'LElbow'
H36M_NAMES[13] = 'LHand'
H36M_NAMES[14] = 'RShoulder'
H36M_NAMES[15] = 'RElbow'
H36M_NAMES[16] = 'RHand'
IN_F = 2
IN_JOINTS = 17
OUT_JOINTS = 17
neighbour_matrix = np.array(
[
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 1., 1., 0.],
[1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1., 0., 1., 1., 0.],
[1., 1., 1., 1., 1., 0., 0., 1., 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 1., 1., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0., 1., 1., 0.],
[1., 1., 0., 0., 1., 1., 1., 1., 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 0., 0., 1., 0., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 1., 0., 0.],
[1., 1., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0.],
[1., 1., 0., 0., 1., 0., 0., 1., 1., 1., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1., 1., 1., 0., 0., 0.],
[1., 1., 1., 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1.],
[1., 1., 0., 0., 1., 0., 0., 1., 1., 1., 0., 1., 0., 0., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 1., 1., 1.]
]
)
ROOT_PATH = '../../examples/app_tutorials/data/'
def mask_weight(weight):
weights = tf.clip_by_norm(weight, 1)
L = neighbour_matrix.T
mask = tf.constant(L)
input_size, output_size = weights.get_shape()
input_size, output_size = int(input_size), int(output_size)
assert input_size % IN_JOINTS == 0 and output_size % IN_JOINTS == 0
in_F = int(input_size / IN_JOINTS)
out_F = int(output_size / IN_JOINTS)
weights = tf.reshape(weights, [IN_JOINTS, in_F, IN_JOINTS, out_F])
mask = tf.reshape(mask, [IN_JOINTS, 1, IN_JOINTS, 1])
weights = tf.cast(weights, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
masked_weights = weights * mask
masked_weights = tf.reshape(masked_weights, [input_size, output_size])
return masked_weights
def flip_data(data):
"""
horizontal flip
data: [N, 17*k] or [N, 17, k], i.e. [x, y], [x, y, confidence] or [x, y, z]
Return
result: [2N, 17*k] or [2N, 17, k]
"""
left_joints = [4, 5, 6, 11, 12, 13]
right_joints = [1, 2, 3, 14, 15, 16]
flipped_data = data.copy().reshape((len(data), 17, -1))
flipped_data[:, :, 0] *= -1 # flip x of all joints
flipped_data[:, left_joints + right_joints] = flipped_data[:, right_joints + left_joints]
flipped_data = flipped_data.reshape(data.shape)
result = np.concatenate((data, flipped_data), axis=0)
return result
def unflip_data(data):
"""
Average original data and flipped data
data: [2N, 17*3]
Return
result: [N, 17*3]
"""
left_joints = [4, 5, 6, 11, 12, 13]
right_joints = [1, 2, 3, 14, 15, 16]
data = data.copy().reshape((2, -1, 17, 3))
data[1, :, :, 0] *= -1 # flip x of all joints
data[1, :, left_joints + right_joints] = data[1, :, right_joints + left_joints]
data = np.mean(data, axis=0)
data = data.reshape((-1, 17 * 3))
return data
class DataReader(object):
def __init__(self):
self.gt_trainset = None
self.gt_testset = None
self.dt_dataset = None
def real_read(self, subset):
file_name = 'h36m_%s.pkl' % subset
print('loading %s' % file_name)
file_path = os.path.join(ROOT_PATH, file_name)
with open(file_path, 'rb
|
') as f:
gt = pickle.load(f)
return gt
def read_2d(self, which='scale', mode='dt_ft', read_confidence=True):
if self.gt_trainset is None:
self.gt_trainset = self.real_read('train')
if self.gt_testset is None:
self.gt_testset = self.real_read('test')
if mode == 'gt':
trainset = np.empty((len(self.gt_trainset), 17, 2)) # [N, 17, 2]
testset = np.empty((len(self.gt_testse
|
t), 17, 2)) # [N, 17, 2]
for idx, item in enumerate(self.gt_trainset):
trainset[idx] = item['joint_3d_image'][:, :2]
for idx, item in enumerate(self.gt_testset):
testset[idx] = item['joint_3d_image'][:, :2]
if read_confidence:
train_confidence = np.ones((len(self.gt_trainset), 17, 1)) # [N, 17, 1]
test_confidence = np.ones((len(self.gt_testset), 17, 1)) # [N, 17, 1]
elif mode == 'dt_ft':
file_name = 'h36m_sh_dt_ft.pkl'
file_path = os.path.join(ROOT_PATH, 'dataset', file_name)
print('loading %s' % file_name)
with open(file_path, 'rb') as f:
self.dt_dataset = pickle.load(f)
trainset = self.dt_dataset['train']['joint3d_image'][:, :, :2].copy() # [N, 17, 2]
testset = self.dt_dataset['test']['joint3d_image'][:, :, :2].copy() # [N, 17, 2]
if read_confidence:
train_confidence = self.dt_dataset['train']['confidence'].copy() # [N, 17, 1]
test_confidence = self.dt_dataset['test']['confidence'].copy() # [N, 17, 1]
else:
assert 0, 'not supported type %s' % mode
# normalize
if which == 'scale':
# map to [-1, 1]
for idx, item in enumerate(self.gt_trainset):
camera_name = item['camera_param']['name']
if camera_name == '54138969' or camera_name == '60457274':
res_w, res_h = 1000, 1002
elif camera_name == '55011271' or camera_name == '58860488':
res_w, res_h = 1000, 1000
else:
assert 0, '%d data item has an invalid camera name' % idx
trainset[idx, :, :] = trainset[idx, :, :] / res_w * 2 - [1, res_h / res_w]
for idx, item in enumerate(self.gt_testset):
camera_name = item['camera_param']['name']
if camera_name == '54138969' or camera_name == '60457274':
res_w, res_h = 1000, 1002
elif camera_name == '55011271' or camera_name == '58860488':
res_w, res_h = 1000, 1000
else:
assert 0, '%d data item has an invalid camera name' % idx
testset[idx, :, :] = testset[idx, :, :] / res_w * 2 - [1, res_h / res_w]
else:
assert 0, 'not support normalize type %s' % which
if read_confidence:
trainset = np.concatenate((trainset, train_confidence), axis=2) # [N, 17, 3]
testset = np.concatenate((testset, test_confidence), axis=2) # [N, 17, 3]
# reshape
trainset, testset = trainset.reshape((len(trainset), -1)).astype(np.float32), testset.reshape(
(len(testset), -1)
).astype(np.float32)
return trainset, testset
def read_3d(self, which='scale', mode='dt_ft'):
if self.gt_trainset is None:
self.gt_trainset = self.real_read('train')
if self.gt_testset is None:
self.gt_testset = self.real_read('test')
# normalize
train_labels = np.empty((len(self.gt_trainset), 17, 3))
test_labels = np.empt
|
RangerOfFire/faker-cinema
|
faker_cinema/screen.py
|
Python
|
mit
| 1,039
| 0
|
from faker.providers import BaseProvider
class ScreenProvider(BaseProvider):
formats = (
'{{screen_name}} {{screen_number}}',
'{{screen_name}} {{screen_number}} ({{screen_suffix}})',
)
screen_names = (
'Screen',
'Theatre',
'Auditorium',
)
screen_suffixes = (
'3D',
'IMAX',
'VIP',
)
@classmethod
def screen_number(cls):
return cls
|
.numerify(cls.random_element(('%', '%%')))
@classmethod
def screen_suffix(cls):
return cls.random_element(cls.screen_suffixes)
@classmethod
def screen_name(cls):
return cls.random_element(cls.screen_names)
def screen(self, number=None):
"""
:param number: The screen number to use (default 1 <= n <= 99)
:example: Screen 9 (3D)
"""
pattern = self.random_element(self.formats)
if nu
|
mber is not None:
pattern = pattern.replace('{{screen_number}}', str(number))
return self.generator.parse(pattern)
|
minusetheartbot/minusetheartbot
|
lib/grove_pi_v1_2_6/grovepi.py
|
Python
|
apache-2.0
| 19,052
| 0.022412
|
#!/usr/bin/env python
#
# GrovePi Python library
# v1.2.2
#
# This file provides the basic functions for using the GrovePi.
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Karan Nayan
# Initial Date: 13 Feb 2014
# Last Updated: 01 June 2015
# http://www.dexterindustries.com/
import smbus
import time
import math
import RPi.GPIO as GPIO
import struct
import sys
debug =0
if sys.version_info<(3,0):
p_version=2
else:
p_version=3
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
# I2C Address of Arduino
address = 0x04
# Command Format
# digitalRead() command format header
dRead_cmd = [1]
# digitalWrite() command format header
dWrite_cmd = [2]
# analogRead() command format header
aRead_cmd = [3]
# analogWrite() command format header
aWrite_cmd = [4]
# pinMode() command format header
pMode_cmd = [5]
# Ultrasonic read
uRead_cmd = [7]
# Get firmware version
version_cmd = [8]
# Accelerometer (+/- 1.5g) read
acc_xyz_cmd = [20]
# RTC get time
rtc_getTime_cmd = [30]
# DHT Pro sensor temperature
dht_temp_cmd = [40]
# Grove LED Bar commands
# Initialise
ledBarInit_cmd = [50]
# Set orientation
ledBarOrient_cmd = [51]
# Set level
ledBarLevel_cmd = [52]
# Set single LED
ledBarSetOne_cmd = [53]
# Toggle single LED
ledBarToggleOne_cmd = [54]
# Set all LEDs
ledBarSet_cmd = [55]
# Get current state
ledBarGet_cmd = [56]
# Grove 4 Digit Display commands
# Initialise
fourDigitInit_cmd = [70]
# Set brightness, not visible until next cmd
fourDigitBrightness_cmd = [71]
# Set numeric value without leading zeros
fourDigitValue_cmd = [72]
# Set numeric value with leading zeros
fourDigitValueZeros_cmd = [73]
# Set individual digit
fourDigitIndividualDigit_cmd = [74]
# Set individual leds of a segment
fourDigitIndividualLeds_cmd = [75]
# Set left and right values with colon
fourDigitScore_cmd = [76]
# Analog read for n seconds
fourDigitAnalogRead_cmd = [77]
# Entire display on
fourDigitAllOn_cmd = [78]
# Entire display off
fourDigitAllOff_cmd = [79]
# Grove Chainable RGB LED commands
# Store color for later use
storeColor_cmd = [90]
# Initialise
chainableRgbLedInit_cmd = [91]
# Initialise and test with a simple color
chainableRgbLedTest_cmd = [92]
# Set one or more leds to the stored color by pattern
chainableRgbLedSetPattern_cmd = [93]
# set one or more leds to the stored color by modulo
chainableRgbLedSetModulo_cmd = [94]
# sets leds similar to a bar graph, reversible
chainableRgbLedSetLevel_cmd = [95]
# Read the button from IR sensor
ir_read_cmd=[21]
# Set pin for the IR reciever
ir_recv_pin_cmd=[22]
dus_sensor_read_cmd=[10]
dust_sensor_en_cmd=[14]
dust_sensor_dis_cmd=[15]
encoder_read_cmd=[11]
encoder_en_cmd=[16]
encoder_dis_cmd=[17]
flow_read_cmd=[12]
flow_disable_cmd=[13]
flow_en_cmd=[18]
# Grove 433MHz Simple RF link kit - Transmitter commands & subcommands
# Control command, with actual subcommand specified as 2nd byte
tx433_control_cmd = [100]
# Control subcommand: set transmitter PIN
tx433_control_set_pin_subcmd = 1
# Control subcommand: initialize message buffer of specified size (up to 64 bytes)
tx433_control_set_buffer_subcmd = 2
# Control subcommand: send contents of the message buffer
tx433_control_send_buffer_subcmd = 3
# Append 3 bytes to the message buffer
tx433_fill_buffer_cmd=[101]
# This allows us to be more specific about which commands contain unused bytes
unused = 0
# Function declarations of the various functions used for encoding and sending
# data from RPi to Arduino
# Write I2C block
def write_i2c_block(address, block):
try:
return bus.write_i2c_block_data(address, 1, block)
except IOError:
if debug:
print ("IOError")
return -1
# Read I2C byte
def read_i2c_byte(address):
try:
return bus.read_byte(address)
except IOError:
if debug:
print ("IOError")
return -1
# Read I2C block
def read_i2c_block(address):
try:
return bus.read_i2c_block_data(address, 1)
except IOError:
if debug:
print ("IOError")
return -1
# Arduino Digital Read
def digitalRead(pin):
write_i2c_block(address, dRead_cmd + [pin, unused, unused])
time.sleep(.1)
n = read_i2c_byte(address)
return n
# Arduino Digital Write
def digitalWrite(pin, value):
write_i2c_block(address, dWrite_cmd + [pin, value, unused])
return 1
# Setting Up Pin mode on Arduino
def pinMode(pin, mode):
if mode == "OUTPUT":
write_i2c_block(address, pMode_cmd + [pin, 1, unused])
elif mode == "INPUT":
write_i2c_block(address, pMode_cmd + [pin, 0, unused])
return 1
# Read analog value from Pin
def analogRead(pin):
bus.write_i2c_block_data(address, 1, aRead_cmd + [pin, unused, unused])
time.sleep(.1)
bus.read_byte(address)
number = bus.read_i2c_block_data(address, 1)
time.sleep(.1)
return number[1] * 256 + number[2]
# Write PWM
def analogWrite(pin, value):
write_i2c_block(address, aWrite_cmd + [pin, value, unused])
return 1
# Read temp in Celsius from Grove Temperature Sensor
def temp(pin, model = '1.0'):
# each of the sensor revisions use different thermistors, each with their own B value constant
if model == '1.2':
bValue = 4250 # sensor v1.2 uses thermistor ??? (assuming NCP18WF104F03RC until SeeedStudio clarifies)
elif model == '1.1':
bValue = 4250 # sensor v1.1 uses thermistor NCP18WF104F03RC
else:
bValue = 3975 # sensor v1.0 uses thermistor TTC3A103*39H
a = analogRead(pin)
resistance = (float)(1023 - a) * 10000 / a
t = (float)(1 / (math.log(resistance / 10000) / bValue + 1 / 298.15) - 273.15)
return t
# Read value from Grove Ultrasonic
def ultrasonicRead(pin):
write_i2c_bloc
|
k(address, uRead_cmd + [pin, unused, unused])
time.sleep(.2)
read_i2c_byte(address)
number = read_i2c_block(address)
return (number[1] * 256 + number[2])
# Read the firmware version
def version():
write_i2c_block(address, version_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return "%s.%s.%s" % (number[1], number[2], number[3])
# Read Grove Accelerometer (+/- 1.5g) XYZ value
de
|
f acc_xyz():
write_i2c_block(address, acc_xyz_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
if number[1] > 32:
number[1] = - (number[1] - 224)
if number[2] > 32:
number[2] = - (number[2] - 224)
if number[3] > 32:
number[3] = - (number[3] - 224)
return (number[1], number[2], number[3])
# Read from Grove RTC
def rtc_getTime():
write_i2c_block(address, rtc_getTime_cmd + [unused, unused, unused])
time.sleep(.1)
read_i2c_byte(address)
number = read_i2c_block(address)
return number
# Read and return temperature and humidity from Grove DHT Pro
def dht(pin, module_type):
write_i2c_block(address, dht_temp_cmd + [pin, module_type, unused])
# Delay necessary for proper readin
|
Bushstar/UFO-Project
|
test/functional/feature_block.py
|
Python
|
mit
| 60,904
| 0.003235
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test block processing."""
import copy
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script, get_legacy_sigopcount_block
from test_framework.key import CECKey
from test_framework.messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
MAX_BLOCK_BASE_SIZE,
uint256_from_compact,
uint256_from_str,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
MAX_SCRIPT_ELEMENT_SIZE,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_DROP,
OP_FALSE,
OP_HASH160,
OP_IF,
OP_INVALIDOPCODE,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
MAX_BLOCK_SIGOPS = 20000
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
if with_witness:
r += tx.serialize_with_witness()
else:
r += tx.serialize_without_witness()
return r
def normal_serialize(self):
return super().serialize()
class FullBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0] # convenience reference to the node
self.bootstrap_p2p() # Add one p2p connection to the node
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
self.spendable_outputs = []
# Create a new block
b0 = self.next_block(0)
self.save_spendable_output()
self.sync_blocks([b0])
# Allow the block to mature
blocks = []
for i in range(99):
blocks.append(self.next_block(5000 + i))
self.save_spendable_output()
self.sync_blocks(blocks)
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
b1 = self.next_block(1, spend=out[0])
self.save_spendable_output()
b2 = self.next_block(2, spend=out[1])
self.save_spendable_output()
self.sync_blocks([b1, b2])
# Fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
self.log.info("Don't reorg to a chain of the same length")
self.move_tip(1)
b3 = self.next_block(3, spend=out[1])
txout_b3 = b3.vtx[1]
self.sync_blocks([b3], False)
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
self.log.info("Reorg to a longer chain")
b4 = self.next_block(4, spend=out[2])
self.sync_blocks([b4])
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
self.move_tip(2)
b5 = self.next_block(5, spend=out[2])
self.save_spendable_output()
self.sync_blocks([b5], False)
self.log.info("Reorg back to the original chain")
b6 = self.next_block(6, spend=out[3])
self.sync_blocks([b6], True)
|
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain with a double spend, even if it is longer")
self.move_tip(5)
b7 = self.next_block(7, spend=out[2])
self.sync_blocks([b7], False)
b8 = self.next_block(8,
|
spend=out[4])
self.sync_blocks([b8], False, reconnect=True)
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a block where the miner creates too much coinbase reward")
self.move_tip(6)
b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b9], False, 16, b'bad-cb-amount', reconnect=True)
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer")
self.move_tip(5)
b10 = self.next_block(10, spend=out[3])
self.sync_blocks([b10], False)
b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1)
self.sync_blocks([b11], False, 16, b'bad-cb-amount', reconnect=True)
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# \-> b3 (1) -> b4 (2)
self.log.info("Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)")
self.move_tip(5)
b12 = self.next_block(12, spend=out[3])
self.save_spendable_output()
b13 = self.next_block(13, spend=out[4])
self.save_spendable_output()
b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1)
self.sync_blocks([b12, b13, b14], False, 16, b'bad-cb-amount', reconnect=True)
# New tip should be b13.
assert_equal(node.getbestblockhash(), b13.hash)
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
self.log.info("Accept a block with lots of checksigs")
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
self.move_tip(13)
b15 = self.next_block(15, spend=out[5], script=lots_of_checksigs)
self.save_spendable_output()
self.sync_blocks([b15], True)
self.log.info("Reject a block with too many checksigs")
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
b16 = self.next_block(16, spend=out[6], script=too_many_checksigs)
self.sync_blocks([b16], False, 16, b'bad-blk-sigops', reconnect=True)
# Attempt to spend a transaction created o
|
quattor/aquilon
|
tests/broker/test_del_chassis.py
|
Python
|
apache-2.0
| 3,407
| 0.000587
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del chassis command."""
import unittest
if __name__ == "__main__":
import u
|
tils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelChassis(TestBrokerCommand):
def test_100_del_ut3c5_used(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[6])
command = "del chassis --chassis ut3c5.aqd-unittest.ms.com"
out
|
= self.badrequesttest(command.split(" "))
self.matchoutput(out, "Chassis ut3c5.aqd-unittest.ms.com is "
"still in use by 3 machines or network devices. "
"Use --clear_slots if you really want to delete it.",
command.split(" "))
def test_101_del_ut3c5(self):
self.dsdb_expect_delete(self.net["unknown0"].usable[6])
command = "del chassis --chassis ut3c5.aqd-unittest.ms.com --clear_slots"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_105_verify_ut3c5(self):
command = "show chassis --chassis ut3c5.aqd-unittest.ms.com"
self.notfoundtest(command.split(" "))
def test_106_del_ut3c5_again(self):
command = ["del_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"DnsRecord ut3c5.aqd-unittest.ms.com, "
"DNS environment internal not found.",
command)
def test_110_del_ut3c1(self):
command = "del chassis --chassis ut3c1.aqd-unittest.ms.com"
self.noouttest(command.split(" "))
def test_115_verify_ut3c1(self):
command = "show chassis --chassis ut3c1.aqd-unittest.ms.com"
self.notfoundtest(command.split(" "))
def test_120_del_ut9_chassis(self):
for i in range(1, 8):
self.dsdb_expect_delete(self.net["ut9_chassis"].usable[i])
command = "del chassis --chassis ut9c%d.aqd-unittest.ms.com" % i
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_125_verify_ut9_chassis(self):
for i in range(1, 6):
command = "show chassis --chassis ut9c%d.aqd-unittest.ms.com" % i
self.notfoundtest(command.split(" "))
def test_130_del_np3c5(self):
self.noouttest(["del_chassis", "--chassis", "np3c5.one-nyp.ms.com"])
def test_140_del_aurora_chassis(self):
self.noouttest(["del_chassis", "--chassis", "oy604c2.ms.com"])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelChassis)
unittest.TextTestRunner(verbosity=2).run(suite)
|
jaywink/federation
|
federation/tests/hostmeta/test_parsers.py
|
Python
|
bsd-3-clause
| 14,602
| 0.001233
|
import json
from unittest.mock import patch
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, int_or_none,
parse_mastodon_document, parse_matrix_document)
from federation.tests.fixtures.hostmeta import (
NODEINFO2_10_DOC, NODEINFO_10_DOC, NODEINFO_20_DOC, STATISTICS_JSON_DOC, MASTODON_DOC, MASTODON_ACTIVITY_DOC,
MASTODON_RC_DOC, MASTODON_DOC_NULL_CONTACT, MATRIX_SYNAPSE_DOC, PLEROMA_MASTODON_API_DOC,
NODEINFO_21_DOC_INVALID_USAGE_COUNTS, MASTODON_DOC_3)
class TestIntOrNone:
def test_returns_negative_values_as_none(self):
assert int_or_none(-1) is None
class TestParseMastodonDocument:
@patch('federation.hostmeta.fetchers.fetch_nodeinfo_document', autospec=True)
def
|
test_calls_nodeinfo_fetcher_if_pleroma(self, mock_fetch):
parse_mastodon_document(json.loads(PLEROMA_MASTODON_API_DO
|
C), 'example.com')
mock_fetch.assert_called_once_with('example.com')
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__null_contact_account(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_NULL_CONTACT), 'example.com')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__rc_version(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_RC_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.1rc1',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__protocols(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_3), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '3.0.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
class TestParseMatrixDocument:
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(403, None))
def test_parse_matrix_document__signups_closed(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': False,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(401, None))
def test_parse_matrix_document__signups_open(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': True,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
class TestParseNodeInfoDocument:
def test_parse_nodeinfo_10_document(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_10_DOC), 'iliketoast.net')
assert result == {
'organization': {
'account': 'podmin@iliketoast.net',
'contact': '',
'name': '',
},
'host': 'iliketoast.net',
'name': 'I Like Toast',
'op
|
fmpr/texttk
|
texttk/texttk.py
|
Python
|
gpl-3.0
| 10,887
| 0.028067
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re, os
import unicodedata
import codecs
import HTMLParser
import nltk
import csv
from nltk.corpus import stopwords
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
from nltk.collocations import *
from nltk.tag import StanfordNERTagger
from sklearn.feature_extraction.text import CountVectorizer
# general purpose functions
def strip_accents_unicode(text):
return ''.join([c for c in unicodedata.normalize('NFKD', text) if not unicodedata.combining(c)])
def remove_html(text):
return re.sub(r'( ?\.+ )+', ' . ', re.sub(r'<[^>]*>', ' . ', text))
def join_urls(text, url_pattern):
m = re.search(url_pattern, text)
while m:
text = re.sub(url_pattern, m.group(3).replace("http://","").replace(".",""), text)
m = re.search(url_pattern, text)
return text
def join_compound_words(text, compound_pattern):
m = re.search(compound_pattern, text)
while m:
text = re.sub(m.group(0), m.group(0).replace("-",""), text)
m = re.search(compound_pattern, text)
return text
def space_out_punctuation(text):
text = re.sub(r',\s', ' , ', text)
text = re.sub(r'\.\.\.\s', ' ... ', text)
text = re.sub(r'\.\s', ' . ', text)
text = re.sub(r';\s', ' ; ', text)
text = re.sub(r':\s', ' : ', text)
text = re.sub(r'\?\s', ' ? ', text)
text = re.sub(r'!\s', ' ! ', text)
text = re.sub(r'"', ' " ', text)
text = re.sub(r'\'', ' \' ', text)
text = re.sub(r'\s\(', ' ( ', text)
text = re.sub(r'\)\s', ' ) ', text)
text = re.sub(r'\s\[', ' [ ', text)
text = re.sub(r'\]\s', ' ] ', text)
text = re.sub(r'-', ' - ', text)
text = re.sub(r'_', ' _ ', text)
text = re.sub(r'\n', ' ', text)
text = re.sub(r'\r', ' ', text)
text = re.sub(r'\s+', ' ', text)
return text
class CustomTokenizer(object):
def __init__(self, tokenizer, stemmer, token_pattern, numeric_pattern):
self.tokenizer = tokenizer
self.stemmer = stemmer
self.token_pattern = token_pattern
self.numeric_pattern = numeric_pattern
def __call__(self, doc):
tokens = []
for t in self.tokenizer(doc):
if self.token_pattern.match(t) and not self.numeric_pattern.match(t):
while "_" in t:
splt = t.split("_")
t = ''.join(splt[1:])
tokens.append(self.stemmer(splt[0]))
tokens.append(self.stemmer(t))
return tokens
#return [self.stemmer(t) for t in self.tokenizer(doc) \
# if self.token_pattern.match(t) and not self.numeric_pattern.match(t)]
class TextPreprocesser(object):
def __init__(self, decode_error='strict', strip_accents='unicode', ignore_list=[], lowercase=True, \
remove_html=True, join_urls=True, use_bigrams=True, use_ner=True, stanford_ner_path="", \
use_lemmatizer=False, max_df=0.95, min_df=1, max_features=None):
self.stanford_ner_path = stanford_ner_path # path to stanford NER
self.decode_error = decode_error # options: {‘strict’, ‘ignore’, ‘replace’}
self.strip_accents = strip_accents # options: {‘ascii’, ‘unicode’, None}
self.ignore_list = ignore_list
self.lowercase = lowercase
self.remove_html = remove_html
self.join_urls = join_urls
self.use_bigrams = use_bigrams
self.use_ner = use_ner
self.use_lemmatizer = use_lemmatizer # use lemmatizer instead of stemmer?
self.max_df = max_df # maximum document frequency
self.min_df = min_df # remove terms that occur in less than min_df documents
self.max_features = max_features # keep only top-N words according to tf across corpus
self.sentence_splitter = PunktSentenceTokenizer().tokenize # Punkt sentence splitter
self.stemmer = SnowballStemmer("english").stem # Snowball stemmer
self.lemmatizer = WordNetLemmatizer().lemmatize # WordNet lemmatizer
self.base_tokenizer = CountVectorizer().build_tokenizer() # sklearn tokenizer works the best, I think...
self.stop_words = stopwords.words("english") # nltk list of 128 stopwords
self.token_pattern = re.compile(r'(?u)\b(\w*[a-zA-Z_]\w+|\w+[a-zA-Z_]\w*)\b') # default value was r'(?u)\b\w\w+\b'
self.numeric_pattern = re.compile(r'^[0-9]+$') # number regex
self.url_pattern = re.compile(r'((http://)?(www\..*?\.\w+).*?)\s')
self.compound_pattern = re.compile(r'\w+(\-\w+)+')
if self.use_lemmatizer:
self.tokenizer = CustomTokenizer(self.base_tokenizer, self.lemmatizer, self.token_pattern, self.numeric_pattern)
else:
self.tokenizer = CustomTokenizer(self.base_tokenizer, self.stemmer, self.token_pattern, self.numeric_pattern)
def find_nbest_bigrams(self, corpus, n, metric, min_freq):
print "finding top-%d bigrams using %s..." % (n, metric)
alltokens = []
simplerTokenizer = CustomTokenizer(self.base_tokenizer, lambda x: x, re.compile(".*"), re.compile("^$"))
for doc in corpus:
for token in [t for t in simplerTokenizer(doc)]:
alltokens.append(token)
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(alltokens)
finder.apply_freq_filter(min_freq) # bigrams must appear at least 5 times
if metric.lower() == "pmi":
best_bigrams = finde
|
r.nbest(bigram_measures.pmi, n) # doctest: +NORMALIZE_WHITESPACE
elif metric.lower() == "chi_sq":
best_bigrams = finder.nbest(bigram_measures.chi_sq, n) # doctest: +NORMALIZE_WHITESPACE
else:
raise Exception("Unknown metric for bigram finder")
return best_bigrams
def remove_punctuation(self, text):
if not hasattr(self, 'simplerTokenizer'):
self.simplerTokenizer = CustomTokenizer(self.base_tokenizer, lambda x: x, self.token_pattern, self.numeric_
|
pattern)
tokens = self.simplerTokenizer(text)
return ' '.join(tokens)
def tag_corpus_ner(self, corpus):
if not hasattr(self, 'stanford_ner'):
self.stanford_ner = StanfordNERTagger(self.stanford_ner_path+"classifiers/english.all.3class.distsim.crf.ser.gz",
self.stanford_ner_path+"stanford-ner.jar")
self.stanford_ner._stanford_jar = self.stanford_ner_path+"stanford-ner.jar:"+self.stanford_ner_path+"lib/*"
print "splitting sentences in corpus (for NER)..."
corpus_sentences = []
sentence_to_doc_map = {}
sent_no = 0
for d in xrange(len(corpus)):
for sent in self.sentence_splitter(corpus[d]):
corpus_sentences.append(sent)
sentence_to_doc_map[sent_no] = d
sent_no += 1
tokenized_sentences = []
for sent in corpus_sentences:
tokenized_sentences.append([t for t in re.split(r'\s+', sent) if len(t) > 0])
#tokenized_sentences = [re.split(r'\s+', sent) for sent in corpus_sentences]
print "tagging sentences with Stanford NER..."
tagged_sentences = self.stanford_ner.tag_sents(tokenized_sentences)
# process NER output
tagged_corpus = []
current_doc_no = 0
current_doc = []
for i in xrange(len(tagged_sentences)):
doc_no = sentence_to_doc_map[i]
if doc_no == current_doc_no:
current_doc += tagged_sentences[i]
else:
tagged_corpus.append(current_doc)
current_doc = []
current_doc_no = doc_no
tagged_corpus.append(current_doc)
# get dictionary of named entities per document
named_entities = []
for tagged_doc in tagged_corpus:
tags = {}
current_ne = []
for token, tag in tagged_doc:
if current_ne:
if tag == "O" or (tag != "O" and tag != current_ne[-1][1]):
tags[' '.join([t for t,_ in current_ne])] = current_ne[0][1]
current_ne = []
if tag != "O":
current_ne.append((token, tag))
if current_ne:
tags[' '.join([t for t,_ in current_ne])] = current_ne[0][1]
named_entities.append(tags)
return tagged_corpus, named_entities
def preprocess_corpus(self, corpus):
print "preprocessing corpus..."
print "corpus size:", len(corpus)
# first pass over the corpus: prepare for NER
print "first pass over the corpus...\n\tunescape characters"
if self.remove_html: print "\tremove html"
if self.strip_accents: print "\tstrip accents"
if self.join_urls: print "\tjoin URLs"
print "\tjoin compound words\n\tspace out punctuation"
for d in xrange(len(corpus)):
corpus[d] = HTMLParser.HTMLParser().unescape(corpus[d])+" "
if self.remove_html:
corpus[d] = remove_html(corpus[d])
if s
|
churchlab/vdj
|
bin/imgt2fasta.py
|
Python
|
apache-2.0
| 1,206
| 0.004975
|
#! /usr/bin/env python
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You
|
may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optparse
import vdj
parser = optpar
|
se.OptionParser()
(options, args) = parser.parse_args()
if len(args) == 2:
inhandle = open(args[0],'r')
outhandle = open(args[1],'w')
elif len(args) == 1:
inhandle = open(args[0],'r')
outhandle = sys.stdout
elif len(args) == 0:
inhandle = sys.stdin
outhandle = sys.stdout
else:
raise Exception, "Wrong number of arguments."
for chain in vdj.parse_imgt(inhandle):
# print >>outhandle, chain.format('fasta') # causes chain.description output instead of chain.id
print >>outhandle, ">%s\n%s" % (chain.id,chain.seq)
|
google/feedloader
|
appengine/initiator/main.py
|
Python
|
apache-2.0
| 8,653
| 0.006703
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initiator module that sends jobs to Task Queue when triggered.
This module provides a handler to which Cloud Pub/Sub will push a message after
completing processing items. After receiving a message, it makes batch jobs and
sends them to Task Queue.
"""
import datetime
import http
import json
import logging
import os
from typing import Tuple
import flask
from google.auth import exceptions as auth_exceptions
from google.cloud import exceptions as cloud_exceptions
from google.cloud import logging as cloud_logging
import bigquery_client
from models import operation_counts
from models import initiator_task
import pubsub_client
import storage_client
import tasks_client
_SERVICE_ACCOUNT = './config/service_account.json'
_PROJECT_ID = os.environ['PROJECT_ID']
_LOCATION = os.environ['REGION']
_QUEUE_NAME = 'processing-items'
_BATCH_SIZE = 1000
_TRIGGER_COMPLETION_BUCKET = os.environ['TRIGGER_COMPLETION_BUCKET']
_LOCK_BUCKET = os.environ['LOCK_BUCKET']
_DATASET_ID_PROCESSING_FEED_DATA = 'processing_feed_data'
_DATASET_ID_FEED_DATA = 'feed_data'
_TABLE_ID_ITEMS = 'items'
_QUERY_FILEPATH_FOR_UPSERT = 'queries/items_to_upsert.sql'
_QUERY_FILEPATH_FOR_DELETE = 'queries/items_to_delete.sql'
_QUERY_FILEPATH_FOR_PREVENT_EXPIRING = 'queries/items_to_prevent_expiring.sql'
_MAILER_TOPIC_NAME = 'mailer-trigger'
_API_METHOD_INSERT = 'insert'
_API_METHOD_DELETE = 'delete'
_TABLE_SUFFIX_UPSERT = 'upsert'
_TABLE_SUFFIX_DELETE = 'delete'
_TABLE_SUFFIX_PREVENT_EXPIRING = 'prevent_expiring'
OPERATION_UPSERT = 'upsert'
OPERAT
|
ION_DELETE = 'delete'
OPERATION_EXPIRING = 'expiring'
OPERATIONS = (OPERATION_UPSERT, OPERATION_DELETE, OPERATION_EXPIRING)
_TARGET_URL_INSERT = '/insert_items'
_TARGET_URL_DELETE = '/delete_i
|
tems'
_TARGET_URL_PREVENT_EXPIRING = '/prevent_expiring_items'
logging_client = cloud_logging.Client()
logging_client.setup_logging(log_level=logging.DEBUG)
app = flask.Flask(__name__)
@app.route('/start', methods=['POST'])
def start() -> Tuple[str, http.HTTPStatus]:
"""Pushes tasks to Cloud Tasks when receiving a task from Cloud Tasks.
The request body must be of a format like:
{
'deleteCount': 1,
'expiringCount': 2,
'upsertCount': 3,
}
The response is an HTTP response with a message.
- 200:
description: the request is successfully processed.
- 400:
description: the request is invalid and failed to be processed.
Returns:
message and HTTP status code.
"""
try:
request_body = json.loads(flask.request.data)
except TypeError:
_cleanup()
return 'Request body is not a string.', http.HTTPStatus.BAD_REQUEST
except ValueError:
_cleanup()
return 'Request body is not in JSON format.', http.HTTPStatus.BAD_REQUEST
logging.info('Request body: %s', request_body)
try:
task = initiator_task.InitiatorTask.from_json(request_body)
except ValueError as error:
logging.error('Error parsing the task JSON: %s', error)
_cleanup()
return 'Message is invalid.', http.HTTPStatus.BAD_REQUEST
logging.info(
'Initiator received a message. upsert_count: %d, delete_count: %d, expiring_count: %d.',
task.upsert_count, task.delete_count, task.expiring_count)
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
any_task_started = False
try:
if task.upsert_count > 0:
_create_processing_table(_TABLE_SUFFIX_UPSERT, _QUERY_FILEPATH_FOR_UPSERT,
timestamp)
_create_tasks_in_cloud_tasks(_TARGET_URL_INSERT, task.upsert_count,
timestamp)
any_task_started = True
if task.delete_count > 0:
_create_processing_table(_TABLE_SUFFIX_DELETE, _QUERY_FILEPATH_FOR_DELETE,
timestamp)
_create_tasks_in_cloud_tasks(_TARGET_URL_DELETE, task.delete_count,
timestamp)
any_task_started = True
if task.expiring_count > 0:
_create_processing_table(_TABLE_SUFFIX_PREVENT_EXPIRING,
_QUERY_FILEPATH_FOR_PREVENT_EXPIRING, timestamp)
_create_tasks_in_cloud_tasks(_TARGET_URL_PREVENT_EXPIRING,
task.expiring_count, timestamp)
any_task_started = True
except cloud_exceptions.GoogleCloudError as gcp_error:
logging.exception('GCP error raised.')
_cleanup()
response_code = gcp_error.code if gcp_error.code else http.HTTPStatus.INTERNAL_SERVER_ERROR
return 'GCP API returned an error.', response_code
except auth_exceptions.GoogleAuthError:
logging.exception('Authorization error raised due to service account.')
_cleanup()
return 'Authorization failed.', http.HTTPStatus.INTERNAL_SERVER_ERROR
# Trigger monitoring cloud composer only when items are sent.
if any_task_started:
_trigger_monitoring_cloud_composer()
else:
# No processing required, so just clean up and send an email.
_cleanup()
_trigger_mailer_for_nothing_processed()
logging.info('Initiator has successfully finished!')
return 'OK', http.HTTPStatus.OK
def _create_processing_table(table_suffix: str, query_filepath: str,
timestamp: str) -> None:
"""Creates a processing table to allow uploader to load items from it.
Args:
table_suffix: name of the BigQuery table suffix.
query_filepath: filepath to a query file.
timestamp: timestamp to identify the run.
"""
try:
query = bigquery_client.generate_query_string(query_filepath, _PROJECT_ID)
except IOError as io_error:
logging.exception(io_error.message)
else:
table_id = f'process_items_to_{table_suffix}_{timestamp}'
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
_SERVICE_ACCOUNT, _DATASET_ID_PROCESSING_FEED_DATA, table_id)
bq_client.initialize_dataset_and_table(query)
def _create_tasks_in_cloud_tasks(target_url: str, items_count: int,
timestamp: str) -> None:
"""Creates tasks in Cloud Tasks to execute uploader.
Args:
target_url: target url of uploader.
items_count: number of items to be processed.
timestamp: timestamp to identify the run.
"""
ct_client = tasks_client.TasksClient.from_service_account_json(
_SERVICE_ACCOUNT,
url=target_url,
project_id=_PROJECT_ID,
location=_LOCATION,
queue_name=_QUEUE_NAME)
ct_client.push_tasks(
total_items=items_count, batch_size=_BATCH_SIZE, timestamp=timestamp)
def _trigger_monitoring_cloud_composer() -> None:
"""Triggers the monitoring application."""
gcs_client = storage_client.StorageClient.from_service_account_json(
_SERVICE_ACCOUNT, _TRIGGER_COMPLETION_BUCKET)
gcs_client.upload_eof()
def _cleanup() -> None:
"""Cleans up resources for the current run to allow another run to start."""
_delete_items_table()
_delete_eof_lock()
def _delete_items_table() -> None:
"""Deletes items table to allow the next run."""
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
_SERVICE_ACCOUNT, _DATASET_ID_FEED_DATA, _TABLE_ID_ITEMS)
bq_client.delete_table()
def _delete_eof_lock() -> None:
"""Deletes EOF.lock file to allow the next run."""
gcs_client = storage_client.StorageClient.from_service_account_json(
_SERVICE_ACCOUNT, _LOCK_BUCKET)
gcs_client.delete_eof_lock()
def _trigger_mailer_for_nothing_processed() -> None:
"""Sends a completion email showing 0 upsert/deletion/expiring calls (sent when no diff)."""
pubsub_publisher = pubsub_client.PubSubClient.from_service_account_json(
_SERVICE_ACCOUNT)
operation_
|
onnodb/CloudBackups
|
trello/__init__.py
|
Python
|
unlicense
| 43
| 0.023256
|
VERSION = "0.1"
|
from trello.api
|
import *
|
DrSkippy/php_books_database
|
tools/bookdbtool/visualizations.py
|
Python
|
bsd-2-clause
| 1,064
| 0.007519
|
import logging
import pandas as pd
import matplotlib.pyplot as plt
def running_total_comparison(df1, window=15):
fig_size = [12,12]
xlim = [0,365]
ylim = [0,max(df1.Pages)]
years = df1.Year.unique()[-window:].tolist()
y = years.pop(0)
_df = df1.loc[df1.Year == y]
|
ax = _df.plot("Day", "Pages", figsize=fig_size, xlim=xlim, ylim=ylim, label=y)
for y in years:
_df = df1.loc[df1.Year == y]
ax = _df.plot("Day", "Pages", figsize=fig_size, xlim=xlim, ylim=ylim, ax=ax, label=y)
def yearly_comparisons(df, cu
|
rrent_year=2020):
now = df.loc[df.Year == current_year]
fig_size = [12, 6]
ax = df.hist("Pages Read", bins=14, color="darkblue", figsize=fig_size)
plt.axvline(x=int(now["Pages Read"]), color="red")
plt.show()
df.plot.bar(x="Rank", y="Pages Read", width=.95, color="darkblue", figsize=fig_size)
plt.axvline(x=int(now["Rank"]) - 1, color="red")
plt.show()
df.sort_values("Year").plot.bar(x="Year", y="Pages Read", width=.95, color="darkblue", figsize=fig_size)
plt.show()
|
movitto/snap
|
test/filemanagertest.py
|
Python
|
gpl-3.0
| 4,521
| 0.002212
|
#!/usr/bin/python
#
# test/filemanagertest.py unit test suite for snap.filemanager
#
# (C) Copyright 2011 Mo Morsi (mo@morsi.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, Version 3,
# as published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# ME
|
RCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import unittest
from snap.filemanager import FileManager
class FileManagerTest(unittest.TestCase):
def testRmAndExists(self):
temp_file_path = os.path.join(os.path.dirname(__file__), "data", "temp-file")
f = open(temp_file_path, 'w')
f.write("foo")
f.close(
|
)
self.assertTrue(os.path.exists(temp_file_path))
self.assertTrue(os.path.isfile(temp_file_path))
self.assertTrue(FileManager.exists(temp_file_path))
FileManager.rm(temp_file_path)
self.assertFalse(os.path.exists(temp_file_path))
self.assertFalse(FileManager.exists(temp_file_path))
def testMv(self):
temp_source_file_path = os.path.join(os.path.dirname(__file__), "data", "temp-source-file")
temp_dest_file_path = os.path.join(os.path.dirname(__file__), "data", "temp-dest-file")
f = open(temp_source_file_path, 'w')
f.write("foo")
f.close()
self.assertTrue(os.path.isfile(temp_source_file_path))
FileManager.mv(temp_source_file_path, temp_dest_file_path)
self.assertFalse(os.path.isfile(temp_source_file_path))
self.assertTrue(os.path.isfile(temp_dest_file_path))
os.remove(temp_dest_file_path)
def testMakeDirAndExists(self):
temp_dir_path = os.path.join(os.path.dirname(__file__), "data", "temp-dir")
FileManager.make_dir(temp_dir_path)
self.assertTrue(os.path.exists(temp_dir_path))
self.assertTrue(os.path.isdir(temp_dir_path))
self.assertTrue(FileManager.exists(temp_dir_path))
os.removedirs(temp_dir_path)
self.assertFalse(os.path.exists(temp_dir_path))
self.assertFalse(FileManager.exists(temp_dir_path))
def testRmDir(self):
temp_dir_path = os.path.join(os.path.dirname(__file__), "data", "temp-dir")
os.mkdir(temp_dir_path)
FileManager.rm_dir(temp_dir_path)
self.assertFalse(os.path.exists(temp_dir_path))
def testReadFile(self):
temp_file_path = os.path.join(os.path.dirname(__file__), "data", "read-file")
f = open(temp_file_path, 'w')
f.write('foobar')
f.close()
c = FileManager.read_file(temp_file_path)
FileManager.rm(temp_file_path)
self.assertEqual("foobar", c)
def testCaptureOutput(self):
out = FileManager.capture_output(['echo', 'yo'])
self.assertEqual("yo\n", out)
def testCaptureOutputWithStdout(self):
out = FileManager.capture_output(['expr', '1', '/', '0'])
self.assertEqual("expr: division by zero\n", out)
def testGetAllFiles(self):
data_path = os.path.join(os.path.dirname(__file__), "data", "tmp")
files = FileManager.get_all_files(include=[data_path])
self.assertIn(os.path.join(data_path, "file1"), files)
self.assertIn(os.path.join(data_path, "subdir", "file2"), files)
files = FileManager.get_all_files(include=[data_path],
exclude=[os.path.join(data_path, 'subdir')])
self.assertIn(os.path.join(data_path, "file1"), files)
self.assertNotIn(os.path.join(data_path, "subdir", "file2"), files)
files = FileManager.get_all_files(include=[data_path], recursive=False)
self.assertIn(os.path.join(data_path, "file1"), files)
self.assertNotIn(os.path.join(data_path, "subdir", "file2"), files)
def testGetAllSubdirectories(self):
data_path = os.path.join(os.path.dirname(__file__), "data")
subdirs = FileManager.get_all_subdirectories(data_path, recursive=True)
self.assertIn(os.path.join(data_path, "tmp"), subdirs)
self.assertIn(os.path.join(data_path, "tmp", "subdir"), subdirs)
subdirs = FileManager.get_all_subdirectories(data_path, recursive=False)
self.assertIn(os.path.join(data_path, "tmp"), subdirs)
self.assertNotIn(os.path.join(data_path, "tmp/subdir"), subdirs)
|
jerkos/mzOS
|
mzos/exp_design.py
|
Python
|
mit
| 3,317
| 0.00211
|
from __future__ import absolute_import
from collections import defaultdict as ddict
import os.path as op
def enum(**enums):
"""#enumeration
#backward compatible
:param enums:
"""
return type('Enum', (), enums)
IONISATION_MODE = enum(NEG=-1, POS=1)
class ExperimentalSettings(object):
"""
:param mz_tol_ppm:
|
:param ionisation_mode:
:param is_dims_experiment:
"""
ADDUCTS_POS = op.abspath("mzos/ressources/POS_ADDUCTS_IMS.csv")
ADDUCTS_NEG =
|
op.abspath("mzos/ressources/NEG_ADDUCTS_IMS.csv")
FRAGMENTS = op.abspath("mzos/ressources/FRAGMENTS_IMS.csv")
def __init__(self, mz_tol_ppm, polarity, is_dims_exp,
frag_conf=None,
neg_adducts_conf=None,
pos_adducts_conf=None):
self.samples = set()
self.polarity = polarity # warning is an ENUM
self.mz_tol_ppm = mz_tol_ppm
self.is_dims_exp = is_dims_exp
# self.databases = databases
self.group_by_id = ddict(set)
self.group_by_sample = {}
# setting isos file, same for both polarity
# self.isos_file = ExperimentalSettings.ISOS
# setting good frags_file
self.frags_file = frag_conf or ExperimentalSettings.FRAGMENTS
self.adducts_file = neg_adducts_conf or ExperimentalSettings.ADDUCTS_NEG \
if polarity == IONISATION_MODE.NEG else pos_adducts_conf or ExperimentalSettings.ADDUCTS_POS
def get_frags(self):
"""
:return:
"""
lines = list()
with open(self.frags_file) as f:
lines += [l.split(",") for l in f.readlines()[1:]]
return [((float(l[3]), 1), l[0]) for l in lines]
def get_adducts(self):
"""
:return:
"""
lines = list()
with open(self.adducts_file) as f:
lines += [l.split(",") for l in f.readlines()[1:]]
return [((float(l[3]), 1), l[0]) for l in lines]
def get_mass_to_check(self):
"""
:return:
"""
if self.is_dims_exp:
return self.get_frags()
return self.get_adducts() + self.get_frags()
def create_group(self, id_, samples):
"""
:param id_:
:param samples:
:return:
"""
group = Group(id_, samples)
for s in list(samples):
self.group_by_sample[s] = group
self.group_by_id[id_] = group
self.samples.union(set(samples))
return group
def get_group(self, id_):
"""
:param id_:
:return:
"""
return self.group_by_id.get(id_)
def get_group_of(self, sample):
"""
:param sample:
:return: return group or None
"""
return self.group_by_sample.get(sample)
def get_group_id_of(self, sample):
"""
:param sample:
:return:
"""
group = self.get_group_of(sample)
if group is None:
return None
return group.name_id
class Group(list):
"""
:param name_id:
:param samples:
:param description:
"""
def __init__(self, name_id, samples, description=""):
super(Group, self).__init__()
self.samples = samples
self.description = description
self.name_id = name_id
|
OpusVL/odoo
|
addons/board/__openerp__.py
|
Python
|
agpl-3.0
| 1,697
| 0.002357
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3
|
of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Publ
|
ic License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dashboards',
'version': '1.0',
'category': 'Hidden',
'description': """
Lets the user create a custom dashboard.
========================================
Allows users to create custom dashboard.
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'security/ir.model.access.csv',
'board_view.xml',
'board_mydashboard_view.xml',
'views/board.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_dashboard_definition.jpeg','images/2_publish_note.jpeg','images/3_admin_dashboard.jpeg',],
}
|
mganeva/mantid
|
scripts/test/Muon/utilities/thread_model_test.py
|
Python
|
gpl-3.0
| 5,303
| 0.001509
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.py3compat import mock
from Muon.GUI.Common.thread_model import ThreadModel
from Muon.GUI.Common import mock_widget
class testModelWithoutExecute:
def __init__(self):
pass
def output(self):
pass
class testModelWithoutOutput:
def __init__(self):
pass
def execute(self):
pass
class testModelWithoutLoadData:
def __init__(self):
pass
def execute(self):
pass
def output(self):
pass
class testModel:
def __init__(self):
self._data = None
def loadData(self, data):
self._data = data
def output(self):
pass
def execute(self):
pass
class LoadFileWidgetViewTest(unittest.TestCase):
class Runner:
"""This runner class creates a main event loop for threaded code to run within (otherwise the threaded
code will not connect signals/slots properly).
The finished signal of a QThread is connected to the finished method below"""
QT_APP = mock_widget.mockQapp()
def __init__(self, thread_model):
if thread_model:
thread_model.start()
def setUp(self):
patcher = mock.patch('Muon.GUI.Common.thread_model.warning')
self.addCleanup(patcher.stop)
self.warning_box_patcher = patcher.start()
self.model = testModel()
self.thread = ThreadModel(self.model)
def mock_model(self):
model = mock.Mock()
model.loadData = mock.Mock(side_effect=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
model.execute = mock.Mock()
model.output = mock.Mock()
return model
# ------------------------------------------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------------------------------------------
def test_that_loadData_called_in_model_with_correct_inputs(self):
self.model.lo
|
adData = mock.Mock()
self.thread.loadData([1, 2, 3, 4, 5])
self.assertEqual(self.model.loadData.call_count, 1)
self.assertEqual(self.model.loadData.call_args_list[0][0][0], [1, 2, 3, 4, 5])
def test_that_execute_is_called_in_model_when_thread_is_started(self):
self.model.execute = mock.Mock()
self.Runner(self.thread)
|
self.thread._thread.wait()
self.Runner.QT_APP.processEvents()
self.assertEqual(self.model.execute.call_count, 1)
def test_that_output_is_called_if_thread_executes_successfully(self):
self.model.execute = mock.Mock()
self.model.output = mock.Mock()
self.Runner(self.thread)
self.thread._thread.wait()
self.Runner.QT_APP.processEvents()
self.assertEqual(self.model.output.call_count, 1)
def test_that_starting_and_finishing_callbacks_are_called_when_thread_starts_and_finishes(self):
start_slot = mock.Mock()
end_slot = mock.Mock()
self.thread.threadWrapperSetUp(start_slot, end_slot)
self.Runner(self.thread)
self.thread._thread.wait()
self.Runner.QT_APP.processEvents()
self.assertEqual(start_slot.call_count, 1)
self.assertEqual(end_slot.call_count, 1)
def test_that_AttributeError_raised_if_trying_to_load_data_into_model_without_loadData_method(self):
model = testModelWithoutLoadData()
thread = ThreadModel(model)
with self.assertRaises(AttributeError):
thread.loadData(None)
def test_that_attribute_error_raised_if_model_does_not_contain_execute_method(self):
model = testModelWithoutExecute()
with self.assertRaises(AttributeError):
ThreadModel(model)
def test_that_attribute_error_raised_if_model_does_not_contain_output_method(self):
model = testModelWithoutOutput
with self.assertRaises(AttributeError):
ThreadModel(model)
def test_that_tearDown_function_called_automatically(self):
start_slot = mock.Mock()
end_slot = mock.Mock()
self.thread.threadWrapperSetUp(start_slot, end_slot)
self.Runner(self.thread)
self.thread._thread.wait()
self.Runner.QT_APP.processEvents()
self.assertEqual(start_slot.call_count, 1)
self.assertEqual(end_slot.call_count, 1)
def test_that_message_box_called_when_execute_throws_even_without_setup_and_teardown_methods(self):
def raise_error():
raise ValueError()
self.model.execute = mock.Mock(side_effect=raise_error)
self.Runner(self.thread)
self.thread._thread.wait()
self.Runner.QT_APP.processEvents()
self.assertEqual(self.warning_box_patcher.call_count, 1)
def test_that_passing_non_callables_to_setUp_throws_AssertionError(self):
with self.assertRaises(AssertionError):
self.thread.threadWrapperSetUp(1, 2)
if __name__ == '__main__':
unittest.main(buffer=False, verbosity=2)
|
JustJokerX/PaperCrawler
|
COLT/COLT2015.py
|
Python
|
gpl-3.0
| 1,083
| 0
|
# coding=utf-8
"""
This file is used to make a crawl
"""
import __init__
import os
import re
import urllib
from utility import prgbar
def get_html(url):
"""Get the html """
page = urllib.urlopen(url)
html = page.read()
return html
def get_pdf(html):
""" xxx"""
reg = r'href="(.+?\.pdf)">pdf'
pdfre = re.compile(reg)
pdflist = re.findall(pdfre, html)
dir_name = 'COLT2015'
maxrows = len(pdflist)
pbar = prgbar.ProgressBar(total=maxrows)
if os.path.exists(dir_name) is False:
os.mkdir(dir_name)
for idx, pdfurl in enumerate(pdflist):
filename = dir_name + '/' + pdfurl
pbar.log('http://jmlr.org/proceedings/papers/v40/' + pdfurl)
|
if os.path.exists(filename) is True:
pbar.log('Exist')
else:
urllib.urlretrieve(
'http://jmlr.org/proceedings/papers/v40/' + pdfurl, filename)
pbar.update(index=(idx + 1))
pbar.finish()
if __name__ == '__main__':
HTML = get_html("http://jmlr.org/proceedings/papers/v40/")
print(ge
|
t_pdf(HTML))
|
Gaia3D/QGIS
|
python/plugins/processing/core/ProcessingConfig.py
|
Python
|
gpl-2.0
| 8,398
| 0.001072
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingConfig.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtCore import QPyNullVariant, QCoreApplication, QSettings
from PyQt4.QtGui import QIcon
from processing.tools.system import tempFolder
class ProcessingConfig:
OUTPUT_FOLDER = 'OUTPUT_FOLDER'
RASTER_STYLE = 'RASTER_STYLE'
VECTOR_POINT_STYLE = 'VECTOR_POINT_STYLE'
VECTOR_LINE_STYLE = 'VECTOR_LINE_STYLE'
VECTOR_POLYGON_STYLE = 'VECTOR_POLYGON_STYLE'
SHOW_RECENT_ALGORITHMS = 'SHOW_RECENT_ALGORITHMS'
USE_SELECTED = 'USE_SELECTED'
USE_FILENAME_AS_LAYER_NAME = 'USE_FILENAME_AS_LAYER_NAME'
KEEP_DIALOG_OPEN = 'KEEP_DIALOG_OPEN'
SHOW_DEBUG_IN_DIALOG = 'SHOW_DEBUG_IN_DIALOG'
RECENT_ALGORITHMS = 'RECENT_ALGORITHMS'
PRE_EXECUTION_SCRIPT = 'PRE_EXECUTION_SCRIPT'
POST_EXECUTION_SCRIPT = 'POST_EXECUTION_SCRIPT'
SHOW_CRS_DEF = 'SHOW_CRS_DEF'
WARN_UNMATCHING_CRS = 'WARN_UNMATCHING_CRS'
settings = {}
settingIcons = {}
@staticmethod
def initialize():
icon = QIcon(os.path.dirname(__file__) + '/../images/alg.png')
ProcessingConfig.settingIcons['General'] = icon
ProcessingConfig.addSetting( Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.SHOW_DEBUG_IN_DIALOG,
ProcessingConfig.tr('Show extra info in
|
Log panel'), True))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.KEEP_DIALOG_OPEN,
ProcessingConfig.tr('Keep dialog open after running an algorithm'), False))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.USE_SELECTED,
ProcessingConfig.tr('Use only selected features'), True))
ProcessingConfig.add
|
Setting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.USE_FILENAME_AS_LAYER_NAME,
ProcessingConfig.tr('Use filename as layer name'), False))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.SHOW_RECENT_ALGORITHMS,
ProcessingConfig.tr('Show recently executed algorithms'), True))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.OUTPUT_FOLDER,
ProcessingConfig.tr('Output folder'), tempFolder()))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.SHOW_CRS_DEF,
ProcessingConfig.tr('Show layer CRS definition in selection boxes'), True))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.WARN_UNMATCHING_CRS,
ProcessingConfig.tr("Warn before executing if layer CRS's do not match"), True))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.RASTER_STYLE,
ProcessingConfig.tr('Style for raster layers'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.VECTOR_POINT_STYLE,
ProcessingConfig.tr('Style for point layers'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.VECTOR_LINE_STYLE,
ProcessingConfig.tr('Style for line layers'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.VECTOR_POLYGON_STYLE,
ProcessingConfig.tr('Style for polygon layers'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.VECTOR_POLYGON_STYLE,
ProcessingConfig.tr('Style for polygon layers'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.PRE_EXECUTION_SCRIPT,
ProcessingConfig.tr('Pre-execution script'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.POST_EXECUTION_SCRIPT,
ProcessingConfig.tr('Post-execution script'), ''))
ProcessingConfig.addSetting(Setting(
ProcessingConfig.tr('General'),
ProcessingConfig.RECENT_ALGORITHMS,
ProcessingConfig.tr('Recent algs'), '', hidden=True))
@staticmethod
def setGroupIcon(group, icon):
ProcessingConfig.settingIcons[group] = icon
@staticmethod
def getGroupIcon(group):
if group == ProcessingConfig.tr('General'):
return QIcon(os.path.dirname(__file__) + '/../images/alg.png')
if group in ProcessingConfig.settingIcons:
return ProcessingConfig.settingIcons[group]
else:
return QIcon( os.path.dirname(__file__) + '/../images/alg.png')
@staticmethod
def addSetting(setting):
ProcessingConfig.settings[setting.name] = setting
@staticmethod
def removeSetting(name):
del ProcessingConfig.settings[name]
@staticmethod
def getSettings():
'''Return settings as a dict with group names as keys and lists of settings as values'''
settings = {}
for setting in ProcessingConfig.settings.values():
if setting.group not in settings:
group = []
settings[setting.group] = group
else:
group = settings[setting.group]
group.append(setting)
return settings
@staticmethod
def readSettings():
for setting in ProcessingConfig.settings.values():
setting.read()
@staticmethod
def getSetting(name):
if name in ProcessingConfig.settings.keys():
v = ProcessingConfig.settings[name].value
if isinstance(v, QPyNullVariant):
v = None
return v
else:
return None
@staticmethod
def setSettingValue(name, value):
if name in ProcessingConfig.settings.keys():
ProcessingConfig.settings[name].value = value
ProcessingConfig.settings[name].save()
@staticmethod
def tr(string, context=''):
if context == '':
context = 'ProcessingConfig'
return QCoreApplication.translate(context, string)
class Setting:
"""A simple config parameter that will appear on the config dialog.
"""
STRING = 0
FILE = 1
FOLDER = 2
def __init__(self, group, name, description, default, hidden=False, valuetype=None):
self.group = group
self.name = name
self.qname = "Processing/Configuration/" + self.name
self.description = description
self.default = default
self.value = default
self.hidden = hidden
self.valuetype = valuetype
def read(self):
qsettings = QSettings()
value = qsettings.value(self.qname, None)
if value is not None:
if isinstance(self.value, bool):
value =
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
regex/remove-all-characters.py
|
Python
|
agpl-3.0
| 413
| 0.007264
|
import re
import string
def replaceIt(file):
|
out = ''
with open(file, 'r') as f:
for line in f:
repl = '[' + re.escape(''.join(string.punctuation)) + ']'
out += re.sub(repl, '', line)
return out
print (replaceIt('test.txt'))
# output
'''
cobaanu231339 91102
12120
|
86mcmnad0ca
'''
# test.txt
'''
coba*())@*&#anu;,231339 91102-$@!%!
''..,121208***&@6mcmnad0ca
'''
|
ivknv/yadisk
|
tests/__init__.py
|
Python
|
lgpl-3.0
| 74
| 0
|
#!/usr/bin/env python
# -*- cod
|
ing: utf-8 -*-
from
|
.yadisk_test import *
|
cedadev/django-sizefield
|
setup.py
|
Python
|
lgpl-3.0
| 1,205
| 0.00249
|
#!/usr/bin/python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
setup(
name='django-sizefield',
version='0.10.ceda',
author='Mathieu Leplatre',
author_email='contact@mathie
|
u-leplatre.info',
url='https://github.com/leplatrem/django-sizefield',
download_url="http://pypi.python.org/pypi/django-sizefield/",
description="A model field to store a file size, whose edition and display shows units.",
long_description=open(os.path.join(here, 'README.rst')).read() + '\n\n' +
open(os.path.join
|
(here, 'CHANGES')).read(),
license='LPGL, see LICENSE file.',
install_requires=[
'Django',
],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=['Topic :: Utilities',
'Natural Language :: English',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7'],
)
|
partofthething/home-assistant
|
homeassistant/components/ipp/config_flow.py
|
Python
|
apache-2.0
| 7,235
| 0.001382
|
"""Config flow to configure the IPP integration."""
import logging
from typing import Any, Dict, Optional
from pyipp import (
IPP,
IPPConnectionError,
IPPConnectionUpgradeRequired,
IPPError,
IPPParseError,
IPPResponseError,
IPPVersionNotSupportedError,
)
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import CONF_BASE_PATH, CONF_SERIAL, CONF_UUID
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
ipp = IPP(
host=data[CONF_HOST],
port=data[CONF_PORT],
base_path=data[CONF_BASE_PATH],
tls=data[CONF_SSL],
verify_ssl=data[CONF_VERIFY_SSL],
session=session,
)
printer = await ipp.printer()
return {CONF_SERIAL: printer.info.serial, CONF_UUID: printer.info.uuid}
class IPPFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle an IPP config flow."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Set up the instance."""
self.discovery_info = {}
async def async_step_user(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by the user."""
if user_input is None:
return self._show_setup_form()
try:
info = await validate_input(self.hass, user_input)
except IPPConnectionUpgradeRequired:
return self._show_setup_form({"base": "connection_upgrade"})
except (IPPConnectionError, IPPResponseError):
_LOGGER.debug("IPP Connection/Response Error", exc_info=True)
return self._show_setup_form({"base": "cannot_connect"})
except IPPParseError:
_LOGGER.debug("IPP Parse Error", exc_info=True)
return self.async_abort(reason="parse_error")
except IPPVersionNotSupportedError:
return self.async_abort(reason="ipp_version_error")
except IPPError:
_LOGGER.debug("IPP Error", exc_info=True)
return self.async_abort(reason="ipp_error")
unique_id = user_input[CONF_UUID] = info[CONF_UUID]
if not unique_id and info[CONF_SERIAL]:
_LOGGER.debug(
"Printer UUID is missing from IPP response. Falling back to IPP serial number"
)
unique_id = info[CONF_SERIAL]
elif not unique_id:
_LOGGER.debug("Unable to determine unique id from IPP response")
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured(updates={CONF_HOST: user_input[CONF_HOST]})
return self.async_create_entry(title=user_input[CONF_HOST], data=user_input)
async def async_step_zeroconf(self, discovery_info: ConfigType) -> Dict[str, Any]:
"""Handle zeroconf discovery."""
port = discovery_info[CONF_PORT]
zctype = discovery_info["type"]
name = discovery_info[CONF_NAME].replace(f".{zctype}", "")
tls = zctype == "_ipps._tcp.local."
base_path = discovery_info["properties"].get("rp", "ipp/print")
self.context.update({"title_placeholders": {"name": name}})
self.discovery_info.update(
{
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: port,
CONF_SSL: tls,
CONF_VERIFY_SSL: False,
CONF_BASE_PATH: f"/{base_path}",
CONF_NAME: name,
CONF_UUID: discovery_info["properties"].get("UUID"),
}
)
try:
info = await validate_input(self.hass, self.discovery_info)
except IPPConnectionUpgradeRequired:
return self.async_abort(reason="connection_upgrade")
except (IPPConnectionError, IPPResponseError):
_LOGGER.debug("IPP Connection/Response Error", exc_info=True)
return self.async_abort(reason="cannot_connect")
except IPPParseError:
_LOGGER.debug("IPP Parse Error", exc_info=True)
return self.async_abort(reason="parse_error")
except IPPVersionNotSupportedError:
return self.async_abort(reason="ipp_version_error")
except IPPError:
_LOGGER.debug("IPP Error", exc_info=True)
return self.async_abort(reason="ipp_error")
unique_id = self.discovery_info[CONF_UUID]
if not unique_id and info[CONF_UUID]:
_LOGGER.debug(
"Printer UUID is missing from discovery info. Falling back to IPP UUID"
)
unique_id = self.discovery_info[CONF_UUID] = info[CONF_UUID]
elif not unique_id and info[CONF_SERIAL]:
_LOGGER.debug(
"Printer UUID is missing from discovery info and IPP response. Falling back to IPP serial number"
)
unique_id = info[CONF_SERIAL]
elif not unique_id:
_LOGGER.debug(
"Unable to determine unique id from discovery info and IPP response"
)
if unique_id:
await self.async_set_unique_id(unique_id)
se
|
lf._abort_if_unique_id_configured(
updates={
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_NAME: self.discovery_info[CONF_NAME],
},
)
await self._async_handle_discovery_without_unique_id()
return await self.async_step_zeroconf_confirm()
async def async_step_zerocon
|
f_confirm(
self, user_input: ConfigType = None
) -> Dict[str, Any]:
"""Handle a confirmation flow initiated by zeroconf."""
if user_input is None:
return self.async_show_form(
step_id="zeroconf_confirm",
description_placeholders={"name": self.discovery_info[CONF_NAME]},
errors={},
)
return self.async_create_entry(
title=self.discovery_info[CONF_NAME],
data=self.discovery_info,
)
def _show_setup_form(self, errors: Optional[Dict] = None) -> Dict[str, Any]:
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=631): int,
vol.Required(CONF_BASE_PATH, default="/ipp/print"): str,
vol.Required(CONF_SSL, default=False): bool,
vol.Required(CONF_VERIFY_SSL, default=False): bool,
}
),
errors=errors or {},
)
|
mwhoffman/reggie
|
tests/test_core_priors.py
|
Python
|
bsd-2-clause
| 1,591
| 0.001257
|
"""
Tests for priors.
"""
# pylint: disable=missing-docstring
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import numpy.testing as nt
import scipy.optimize as spop
import reggie.core.priors as priors
### BASE TEST CLASS ###########################################################
class PriorTest(object):
def __init__(self, prior):
self.prior = prior
def test_repr(self):
_ = repr(self.prior)
def test_bounds(self):
bshape = np.shape(self.prior.bounds)
assert bshape == (2,) or bshape == (self.prior.ndim, 2)
def test_sample(self):
assert np.shape(self.prior.sample()) == (self.prior.ndim,)
assert np.shape(self.prior.sample(5)) == (5, self.prior.ndim)
def test_logprior(self):
for theta in self.prior.sample(5, 0):
g1 = spop.approx_fprime(theta, self.prior.get_logprior, 1e-8)
_, g2 = self.prior.get_logprior(theta, True)
nt.assert_allclose(g1, g2, rtol=1e-6)
### PER-INSTANCE TESTS ######################################
|
##################
class TestUniform(PriorTest):
def __init__(self):
PriorTest.__init__(self, priors.Uniform([0, 0], [1, 1]))
class TestNormal(PriorTest):
def __init__(self):
PriorTest.__init__(self, priors.Normal([0, 0
|
], [1, 1]))
class TestLogNormal(PriorTest):
def __init__(self):
PriorTest.__init__(self, priors.LogNormal([0, 0], [1, 1]))
def test_uniform():
nt.assert_raises(ValueError, priors.Uniform, 0, -1)
|
ratt-ru/PyMORESANE
|
tests/test_iuwt_convolution.py
|
Python
|
gpl-2.0
| 107
| 0.009346
|
import pymoresane.iuwt_convolution
import
|
unittes
|
t
class TestIuwtConvolution(unittest.TestCase):
pass
|
andrescollazos/sistemas-distribuidos
|
Taller3/cliente.py
|
Python
|
gpl-3.0
| 739
| 0.004071
|
# coding=u
|
tf-8
#Se importa el módulo ServerProxy de xmlrpclib.
from xmlrpclib import ServerProxy
#Se conecta al equipo por el puerto 5005
s = ServerProxy('http://localhost:5005')
#Se llama a la función pasandole x y devuelve el doble de x
salir = False
while not salir:
num1 = input("Digite un numero: ")
num2 = input("Digite otro numero: ")
op1 = raw_input("Operacion? (+)(-)(*)(/)(^)(sqr)(log): ")
try:
print "\n
|
La respuesta es: ", s.operacion(num1, num2, op1)
except OverflowError:
print "\nLos numeros ingresados son muy grandes! Exceden capacidad!"
salir = raw_input("\n\nDesea continuar? (Y/N): ")
if salir == "N" or salir == "n":
salir = True
else:
salir = False
|
dbarbier/privot
|
python/test/t_coupling_tools.py
|
Python
|
lgpl-3.0
| 21,167
| 0.005055
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from openturns import coupling_tools
import os
import time
import sys
wanted_lines = '# ooo\nE=@E\nE1=@E\nFE1=@F#oo\nZ=@Z@Z\n# ooo\n'
semi_parsed = '# ooo\nE=2\nE1=2\nFE1=@F#oo\nZ=@Z@Z\n# ooo\n'
parsed = '# ooo\nE=1.6\nE1=1.6\nFE1=5#oo\nZ=66\n# ooo\n'
# how many Mo for perf file
#howbig = 3024
howbig = 256
max_time = divmod(howbig, 5)[0]
def create_template():
template_name = 'template.in'
template_handle = open(template_name, 'wb')
template_handle.write(wanted_lines.encode())
template_handle.close()
return template_name
def create_big_template():
template_name = 'template_big.in'
template_handle = open(template_name, 'wb')
print('create template file of ' + str(howbig) + 'Mo')
template_handle.write(wanted_lines.encode())
for i in range(howbig):
for i in range(1024):
# line of 1024 octets
template_handle.write(b'u'*1024)
template_handle.write(b'\n')
template_handle.write(b'# ooo\n')
template_handle.close()
return template_name
def remove_file(filename, quiet=False):
if quiet:
try:
os.remove(filename)
except:
pass
else:
os.remove(filename)
def check_outfile(filename, wanted_result):
""" wanted_result: a string """
is_ok = True
handle = open(filename)
for wanted_line, result_line in zip(wanted_result.splitlines(True), handle):
if wanted_line != result_line:
print('Aaaaarg, result is not what we wanted (result:' + \
result_line + ', should be:' + wanted_line.decode() + ')')
is_ok = False
handle.close()
if is_ok:
print('check ' + filename + ': ok')
else:
exit(1)
#return is_ok
def check_replace():
print("=== " + sys._getframe().f_code.co_name)
print("= check replace std")
template = create_template()
template_out = template + ".replaced"
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E"], values=[2])
check_outfile(template_out, semi_parsed)
remove_file(template_out)
remove_file(template)
print("= check replace more vars")
template = create_template()
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E", "@F", "@Z"],
values=[1.6, 5, 6])
check_outfile(template_out, parsed)
remove_file(template_out)
remove_file(template)
print("= check replace inplace")
template = create_template()
coupling_tools.replace(infile=template, outfile=template,
tokens=["@E", "@F", "@Z"], values=[1.6, 5, 6])
check_outfile(template, parsed)
remove_file(template)
print("= check replace inplace with None")
template = create_template()
coupling_tools.replace(infile=template, outfile=None,
tokens=["@E", "@F", "@Z"], values=[1.6, 5, 6])
check_outfile(template, parsed)
remove_file(template)
print("= check replace big template")
start_time = time.time()
template = create_big_template()
sys.stderr.write( "big template created in : " + str(time.time() - start_time) + "s\n" )
template_out = template + ".replaced"
start_time = time.time()
coupling_tools.replace(infile=template, outfile=template_out,
tokens=["@E"], values=[2])
time_to_parse = str(int(time.time() - start_time))
check_outfile(template_out, semi_parsed)
remove_file(template_out)
remove_file(template)
sys.stderr.write( "parsed template in: " + time_to_parse + "s\n" )
# parsed template=3G -> 25s on bx (ssd, core i7@2.5GHz)
if int(time_to_parse) > max_time:
print('time to get token took too long (should be ' + str(max_time)+'s max)')
exit(1)
else:
print('check replace big template: ok')
def create_results(tokens, values=None, big=False):
filename = "results.out"
handle = open(filename, "wb")
if big:
print("create file of " + str(howbig) + "Mo")
for i in range(howbig):
for i in range(1024):
# line of 1024 octets
handle.write(b'u'*1024)
handle.write(b'\n')
handle.write(b'# ooo\n')
if values == None:
handle.write(tokens.encode())
else:
n = 0
for t, v in zip(tokens, values):
handle.write((t + str(v)).encode())
# go to next line sometimes
if n%3 == 0:
handle.write(b'\n')
n += 1
handle.close()
return filename
def check_results(ok_values, values):
if ok_values == values:
print("ok")
else:
print("Error: found: " + str(values) + " should be: " + str(ok_values))
exit(1)
def check_get_line_col():
print("=== " + sys._getframe().f_code.co_name)
content = """01 02 03 04 05 06 07 08 09
11 12 13 14 15 16 17 18 19
21 22 23 24 25 26 27 28 29
31 32 33 34 35 36 37 38 39
"""
result_file = create_results(content)
value = 1
result = coupling_tools.get_line_col(result_file)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 3
result = coupling_tools.get_line_col(result_file, skip_col=2)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 11
result = coupling_tools.get_line_col(result_file, 1)
if
|
value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 16
result = coupling_tools.get_line_col(result_file, 1, 5)
if value != result: raise Exception("! got " + str(result) + ' inst
|
ead of ' +
str(value))
value = 9
result = coupling_tools.get_line_col(result_file, skip_col=-1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 17
result = coupling_tools.get_line_col(result_file, 1, -3)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 31
result = coupling_tools.get_line_col(result_file, -1)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 14
result = coupling_tools.get_line_col(result_file, -3, -6)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 3
result = coupling_tools.get_line_col(result_file, seek=6)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 23
result = coupling_tools.get_line_col(result_file, skip_line=1, skip_col=2, seek=30)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 31
result = coupling_tools.get_line_col(result_file, skip_line=-1, seek=-100)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
#coupling_tools.debug = True
value = 21
just_before_line_ret = 80
sys.stderr.write( 'char at pos ' + str(just_before_line_ret) + ':->' + \
content[just_before_line_ret] + '<-\n' )
result = coupling_tools.get_line_col(result_file, skip_line=-1,
seek=-just_before_line_ret)
if value != result: raise Exception("! got " + str(result) + ' instead of ' +
str(value))
value = 21
just_after_line_ret = just_before_line_ret + 1
sys.stderr.write( 'char at pos ' + str(just_after_line_ret) + ':->' + \
content[just_after_line_ret] + '<-\n')
|
FutureSharks/invokust
|
invokust/settings.py
|
Python
|
mit
| 3,137
| 0.000319
|
# -*- coding: utf-8 -*-
import os
from locust.main import load_locustfile
def create_settings(
from_environment=False,
locustfile=None,
classes=None,
host=None,
num_users=None,
spawn_rate=None,
reset_stats=False,
run_time="3m",
loglevel="INFO",
):
"""
Returns a settings object to configure the locust load test.
Arguments
from_environment: get settings from environment variables
locustfile: locustfile to use for loadtest
classes: locust classes to use for load test
host: host for load testing
num_users: number of users to simulate in load test
spawn_rate: number of users per second to start
reset_stats: Whether to reset stats after all users are hatched
run_time: The length of time to run the test for. Cannot exceed the duration limit set by lambda
If
|
from_environment is set to True then this function will attempt to set
the attributes from environment variables. The environment variables are
named LOCUST_ + attribute name in upper case.
"""
settings
|
= type("", (), {})()
settings.from_environment = from_environment
settings.locustfile = locustfile
# parameters needed to create the locust Environment object
settings.classes = classes
settings.host = host
settings.tags = None
settings.exclude_tags = None
settings.reset_stats = reset_stats
settings.step_load = False
settings.stop_timeout = None
# parameters to configure test
settings.num_users = num_users
settings.run_time = run_time
settings.spawn_rate = spawn_rate
if from_environment:
for attribute in [
"locustfile",
"classes",
"host",
"run_time",
"num_users",
"spawn_rate",
"loglevel",
]:
var_name = "LOCUST_{0}".format(attribute.upper())
var_value = os.environ.get(var_name)
if var_value:
setattr(settings, attribute, var_value)
if settings.locustfile is None and settings.classes is None:
raise Exception("One of locustfile or classes must be specified")
if settings.locustfile and settings.classes:
raise Exception("Only one of locustfile or classes can be specified")
if settings.locustfile:
docstring, classes, shape_class = load_locustfile(settings.locustfile)
settings.classes = [classes[n] for n in classes]
else:
if isinstance(settings.classes, str):
settings.classes = settings.classes.split(",")
for idx, val in enumerate(settings.classes):
# This needs fixing
settings.classes[idx] = eval(val)
for attribute in ["classes", "host", "num_users", "spawn_rate"]:
val = getattr(settings, attribute, None)
if not val:
raise Exception(
"configuration error, attribute not set: {0}".format(attribute)
)
if isinstance(val, str) and val.isdigit():
setattr(settings, attribute, int(val))
return settings
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatterternary/marker/line/_coloraxis.py
|
Python
|
mit
| 581
| 0
|
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self,
plotly_name="coloraxis",
parent_name="scatterternary.marker.line",
**kwargs
):
super(C
|
oloraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"
|
),
**kwargs
)
|
texta-tk/texta
|
account/urls.py
|
Python
|
gpl-3.0
| 1,734
| 0.006344
|
from django.conf.urls import url
from . import views
from django.contrib.auth.views import PasswordResetConfirmView, PasswordResetView, PasswordResetDoneView, PasswordResetCompleteView
urlpatterns = [
url(r'^$', views.index,name="home"),
url(r'update_dataset$', vie
|
ws.update_dataset, name='update_dataset'),
url(r'update_model$', views.update_model, name='update_model'),
url(r'^confirm/(?P<email_auth_token>([a-z]|[0-9]){14})/$', views.confirm_email, name='confirm_email'),
url(r'create$', views.create, name="create"),
url(r'login$', views.login, name="login"),
url(r'log_out$', views.log_out, name="log_out"),
url(r'change_pwd$', views.navigate_change_pwd, name="ch
|
ange_pwd"),
url(r'change_password$', views.change_password, name="change_password"),
url(r'get_auth_token$', views.get_auth_token, name="get_auth_token"),
url(r'revoke_auth_token$', views.revoke_auth_token, name="revoke_auth_token"),
url(r'password_reset$', PasswordResetView.as_view(template_name='password-templates/password-form.html',email_template_name='password-templates/password-email.html'), name='password_reset'),
url(r'password_reset_done/', PasswordResetDoneView.as_view(template_name='password-templates/password-reset-done.html'), name='password_reset_done'),
url(r'password_reset_confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
PasswordResetConfirmView.as_view(template_name="password-templates/password-reset-confirm.html"),
name='password_reset_confirm'),
url(r'password_reset_complete$', PasswordResetCompleteView.as_view(template_name='password-templates/password-reset-complete.html'), name='password_reset_complete'),
]
|
chromakey/django-salesforce
|
salesforce/backend/driver.py
|
Python
|
mit
| 6,090
| 0.001806
|
"""
Dummy Salesforce driver that simulates some parts of DB API 2
https://www.python.org/dev/peps/pep-0249/
should be independent on Django.db
and if possible should be independent on django.conf.settings
Code at lower level than DB API should be also here.
"""
from collections import namedtuple
import requests
import socket
from django.conf import settings
from django.utils.six import PY3
try:
import beatbox
except ImportError:
beatbox = None
import logging
log = logging.getLogger(__name__)
apilevel = "2.0"
# threadsafety = ...
# uses '%s' style parameters
paramstyle = 'format'
API_STUB = '/services/data/v35.0'
request_count = 0 # global c
|
ounter
# All error types described in DB API 2 are implemented the same way as in
#
|
Django 1.6, otherwise some exceptions are not correctly reported in it.
class Error(Exception if PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class SalesforceError(DatabaseError):
"""
DatabaseError that usually gets detailed error information from SF response
in the second parameter, decoded from REST, that frequently need not to be
displayed.
"""
def __init__(self, message='', data=None, response=None, verbose=False):
DatabaseError.__init__(self, message)
self.data = data
self.response = response
self.verbose = verbose
if verbose:
log.info("Error (debug details) %s\n%s", response.text,
response.__dict__)
class Connection(object):
# close and commit can be safely ignored because everything is
# committed automatically and REST is stateles.
def close(self):
pass
def commit(self):
pass
def rollback(self):
log.info("Rollback is not implemented.")
# DB API function
def connect(**params):
return Connection()
# LOW LEVEL
def getaddrinfo_wrapper(host, port, family=socket.AF_INET, socktype=0, proto=0, flags=0):
"""Patched 'getaddrinfo' with default family IPv4 (enabled by settings IPV4_ONLY=True)"""
return orig_getaddrinfo(host, port, family, socktype, proto, flags)
# patch to IPv4 if required and not patched by anything other yet
if getattr(settings, 'IPV4_ONLY', False) and socket.getaddrinfo.__module__ in ('socket', '_socket'):
log.info("Patched socket to IPv4 only")
orig_getaddrinfo = socket.getaddrinfo
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getaddrinfo_wrapper
# ----
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
#import pdb; pdb.set_trace()
#print("== REQUEST %s | %s | %s | %s" % (url, f, args, kwargs))
global request_count
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose
)
if(data['errorCode'] == 'INVALID_FIELD'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise SalesforceError('%s' % data, data, response, verbose)
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/test/test_string.py
|
Python
|
apache-2.0
| 5,578
| 0.00251
|
import unittest, string
from test import support
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.ascii_lowercase
string.ascii_uppercase
string.ascii_letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI')
|
, 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.ass
|
ertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actually got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
def test_vformat_assert(self):
cls = string.Formatter()
kwargs = {
"i": 100
}
self.assertRaises(ValueError, cls._vformat,
cls.format, "{0}", kwargs, set(), -2)
def test_convert_field(self):
cls = string.Formatter()
self.assertEqual(cls.format("{0!s}", 'foo'), 'foo')
self.assertRaises(ValueError, cls.format, "{0!h}", 'foo')
def test_get_field(self):
cls = string.Formatter()
class MyClass:
name = 'lumberjack'
x = MyClass()
self.assertEqual(cls.format("{0.name}", x), 'lumberjack')
lookup = ["eggs", "and", "spam"]
self.assertEqual(cls.format("{0[2]}", lookup), 'spam')
def test_main():
support.run_unittest(ModuleTest)
if __name__ == "__main__":
test_main()
|
softak/webfaction_demo
|
vendor-local/lib/python/selenium/webdriver/remote/errorhandler.py
|
Python
|
bsd-3-clause
| 6,176
| 0.001619
|
# Copyright 2010 WebDriver committers
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF AN
|
Y KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitation
|
s under the License.
from selenium.common.exceptions import ElementNotSelectableException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidCookieDomainException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import InvalidSelectiorException
from selenium.common.exceptions import ImeNotAvailableException
from selenium.common.exceptions import ImeActivationFailedException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import UnableToSetCookieException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import ErrorInResponseException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
class ErrorCode(object):
"""
Error codes defined in the WebDriver wire protocol.
"""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = 7
NO_SUCH_FRAME = 8
UNKNOWN_COMMAND = 9
STALE_ELEMENT_REFERENCE = 10
ELEMENT_NOT_VISIBLE = 11
INVALID_ELEMENT_STATE = 12
UNKNOWN_ERROR = 13
ELEMENT_IS_NOT_SELECTABLE = 15
JAVASCRIPT_ERROR = 17
XPATH_LOOKUP_ERROR = 19
TIMEOUT = 21
NO_SUCH_WINDOW = 23
INVALID_COOKIE_DOMAIN = 24
UNABLE_TO_SET_COOKIE = 25
UNEXPECTED_ALERT_OPEN = 26
NO_ALERT_OPEN = 27
SCRIPT_TIMEOUT = 28
INVALID_ELEMENT_COORDINATES = 29
IME_NOT_AVAILABLE = 30;
IME_ENGINE_ACTIVATION_FAILED = 31
INVALID_SELECTOR = 32
MOVE_TARGET_OUT_OF_BOUNDS = 34
INVALID_XPATH_SELECTOR = 51
INVALID_XPATH_SELECTOR_RETURN_TYPER = 52
METHOD_NOT_ALLOWED = 405
class ErrorHandler(object):
"""
Handles errors returned by the WebDriver server.
"""
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response['status']
if status == ErrorCode.SUCCESS:
return
exception_class = ErrorInResponseException
if status == ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status == ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status == ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status == ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status == ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status == ErrorCode.INVALID_ELEMENT_STATE:
exception_class = WebDriverException
elif status == ErrorCode.INVALID_SELECTOR \
or status == ErrorCode.INVALID_XPATH_SELECTOR \
or status == ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectiorException
elif status == ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status == ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = WebDriverException
elif status == ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = WebDriverException
elif status == ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status == ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status == ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status == ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status == ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status == ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ErrorCode.ImeActivationFailedException
else:
exception_class = WebDriverException
value = response['value']
if type(value) is str:
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
message = ''
if 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
zeroeth = ''
try:
zeroeth = value['stackTrace'][0]
except:
pass
if zeroeth.has_key('methodName'):
stacktrace = "Method %s threw an error in %s" % \
(zeroeth['methodName'],
self._value_or_default(zeroeth, 'fileName', '[No file name]'))
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
raise exception_class(message, screen, stacktrace)
def _value_or_default(self, obj, key, default):
return obj[key] if obj.has_key(key) else default
|
typesupply/defconAppKit
|
Lib/defconAppKit/controls/openTypeControlsView.py
|
Python
|
mit
| 12,083
| 0.002731
|
from AppKit import NSScroller, NSColor, NSAttributedString, NSMenuItem, NSShadowAttributeName, NSShadow, \
NSForegroundColorAttributeName, NSFont, NSFontAttributeName, NSSmallControlSize, NSView
import vanilla
class OpenTypeControlsView(vanilla.ScrollView):
def __init__(self, posSize, callback):
self._callback = callback
# put the controls group into a flipped group.
# this will give better scroll behavior.
width = posSize[2] - NSScroller.scrollerWidth() - 2
view = DefconAppKitTopAnchoredNSView.alloc().init()
view.setFrame_(((0, 0), (width, 0)))
# call the super
super(OpenTypeControlsView, self).__init__(posSize, view, hasHorizontalScroller=False, drawsBackground=False)
# build the view for the controls
self._controlGroup = vanilla.Group((0, 0, width, 0))
view.addSubview_(self._controlGroup.getNSView())
# build the static controls
top = 10
# mode
self._controlGroup.modeTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("DISPLAY MODE", titleControlAttributes), sizeStyle="small")
top += 20
self._controlGroup.modeRadioGroup = vanilla.RadioGroup((10, top, -10, 38),
["Glyph Preview", "Glyph Records"], callback=self._controlEditCallback)
self._controlGroup.modeRadioGroup.set(0)
top += 48
self._controlGroup.line1 = vanilla.HorizontalLine((10, top, -10, 1))
top += 11
# case
self._controlGroup.caseTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("CASE CONVERSION", titleControlAttributes), sizeStyle="small")
top += 20
self._controlGroup.caseRadioGroup = vanilla.RadioGroup((10, top, -10, 58),
["Unchanged", "Uppercase", "Lowercase"], callback=self._controlEditCallback)
self._controlGroup.caseRadioGroup.set(0)
top += 68
# language, script and direction
self._controlGroup.scriptTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("SCRIPT & LANGUAGE", titleControlAttributes), sizeStyle="small")
top += 20
self._controlGroup.scriptPopUpButton = vanilla.PopUpButton((10, top, -10, 20), [], callback=self._controlEditCallback)
top += 25
self._controlGroup.languagePopUpButton = vanilla.PopUpButton((10, top, -10, 20), [], callback=self._controlEditCallback)
top += 35
self._controlGroup.directionTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("WRITING DIRECTION", titleControlAttributes), sizeStyle="small")
top += 20
self._controlGroup.directionRadioGroup = vanilla.RadioGroup((10, top, -10, 38),
["Left to Right", "Right to Left"], callback=self._controlEditCallback)
self._controlGroup.directionRadioGroup.set(0)
top += 48
# GSUB and GPOS
self._controlGroup.line2 = vanilla.HorizontalLine((10, top, -10, 1))
top += 11
# set document view height
(x, y), (w, h) = self._nsObject.documentView().frame()
self._nsObject.documentView().setFrame_(((x, y), (w, top)))
x, y, w, h = self._controlGroup.getPosSize()
self._controlGroup.setPosSize((x, y, w, top))
# storage
self._dynamicTop = top
self._gsubAttributes = {}
self._gposAttributes = {}
self._featureNames = {}
def _breakCycles(self):
self._callback = None
super(OpenTypeControlsView, self)._breakCycles()
def _controlEditCallback(self, sender):
self._callback(self)
def setFont(self, font):
# script list
if font is None:
scriptList = []
else:
scriptList = ["DFLT"] + font.getScriptList()
unsupportedScripts = [i for i in scriptTags if i not in scriptList]
if unsupportedScripts:
scriptList.append(NSMenuItem.separatorItem())
scriptList += unsupportedScripts
self._controlGroup.scriptPopUpButton.setItems(scriptList)
# language list
if
|
font is None:
languageList = []
else:
languageList = ["Default"] + font.getLanguageList()
unsupportedLanguages = [i for i in languageTags if i not in languageList]
if unsupportedLanguages:
languageList.append(NSMenuItem.separatorItem())
languageList += unsupportedLanguages
self._controlGroup.languagePopUpButton.setItems(languageList)
|
# teardown existing controls
for attr in self._gsubAttributes:
delattr(self._controlGroup, attr)
for attr in self._gposAttributes:
delattr(self._controlGroup, attr)
for attr in self._featureNames:
delattr(self._controlGroup, attr)
if hasattr(self._controlGroup, "gposTitle"):
del self._controlGroup.gposTitle
if hasattr(self._controlGroup, "gsubTitle"):
del self._controlGroup.gsubTitle
# stylistic set names
if hasattr(font, "stylisticSetNames"):
stylisticSetNames = font.stylisticSetNames
else:
stylisticSetNames = {}
# GSUB
top = self._dynamicTop
if font is None:
gsub = None
else:
gsub = font.gsub
if gsub is None:
gsubFeatureList = []
else:
gsubFeatureList = gsub.getFeatureList()
self._gsubAttributes = {}
self._featureNames = {}
if gsubFeatureList:
self._controlGroup.gsubTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("GSUB", titleControlAttributes), sizeStyle="small")
top += 20
for tag in gsubFeatureList:
state = font.gsub.getFeatureState(tag)
attr = "gsubCheckBox_%s" % tag
obj = vanilla.CheckBox((10, top, -10, 22), tag, value=state, callback=self._controlEditCallback)
setattr(self._controlGroup, attr, obj)
self._gsubAttributes[attr] = tag
top += 20
# stylistic set name
if tag in stylisticSetNames:
attr = "ssName_%s" % tag
setName = stylisticSetNames[tag]
if hasattr(self._controlGroup, attr):
obj = getattr(self._controlGroup, attr)
obj.set(setName)
else:
obj = vanilla.TextBox((26, top, -10, 13), setName, sizeStyle="mini")
setattr(self._controlGroup, attr, obj)
self._featureNames[attr] = setName
top += 13
top += 10
# GPOS
if font is None:
gpos = None
else:
gpos = font.gpos
if gpos is None:
gposFeatureList = []
else:
gposFeatureList = gpos.getFeatureList()
self._gposAttributes = {}
if gposFeatureList:
self._controlGroup.gposTitle = vanilla.TextBox((10, top, -10, 14),
NSAttributedString.alloc().initWithString_attributes_("GPOS", titleControlAttributes), sizeStyle="small")
top += 20
for tag in gposFeatureList:
state = font.gpos.getFeatureState(tag)
attr = "gposCheckBox_%s" % tag
obj = vanilla.CheckBox((10, top, -10, 22), tag, value=state, callback=self._controlEditCallback)
setattr(self._controlGroup, attr, obj)
self._gposAttributes[attr] = tag
top += 20
top += 10
# set the view size
(x, y), (w, h) = self._nsObject.documentView().frame()
self._nsObject.documentView().setFrame_(((x, y), (w, top)))
x, y, w, h = self._controlGroup.getPosSize()
self._controlGroup.setPosSize((x, y, w, top))
def get(self):
mode = ["preview",
|
paninetworks/neutron
|
neutron/tests/unit/plugins/ml2/test_extension_driver_api.py
|
Python
|
apache-2.0
| 10,552
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron import manager
from neutron.plugins.ml2 import config
from neutron.tests.unit.plugins.ml2.drivers import ext_test
from neutron.tests.unit.plugins.ml2 import test_plugin
class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
_extension_drivers = ['test']
def setUp(self):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(ExtensionDriverTestCase, self).setUp()
self._plugin = manager.NeutronManager.get_plugin()
self._ctxt = context.get_admin_context()
def test_network_attr(self):
with self.network() as network:
# Test create network
ent = network['network'].get('network_extension')
self.assertIsNotNone(ent)
# Test list networks
res = self._list('networks')
val = res['networks'][0].get('network_extension')
self.assertEqual('Test_Network_Extension_extend', val)
# Test network update
data = {'network':
{'network_extension': 'Test_Network_Extension_Update'}}
res = self._update('networks', network['network']['id'], data)
val = res['network'].get('network_extension')
self.assertEqual('Test_Network_Extension_Update_update', val)
def test_subnet_attr(self):
with self.subnet() as subnet:
# Test create subnet
ent = subnet['subnet'].get('subnet_extension')
self.assertIsNotNone(ent)
# Test list subnets
res = self._list('subnets')
val = res['subnets'][0].get('subnet_extension')
self.assertEqual('Test_Subnet_Extension_extend', val)
# Test subnet update
data = {'subnet':
{'subnet_extension': 'Test_Subnet_Extension_Update'}}
res = self._update('subnets', subnet['subnet']['id'], data)
val = res['subnet'].get('subnet_extension')
self.assertEqual('Test_Subnet_Extension_Update_update', val)
def test_port_attr(self):
with self.port() as port:
# Test create port
ent = port['port'].get('port_extension')
self.assertIsNotNone(ent)
# Test list ports
res = self._list('ports')
val = res['ports'][0].get('port_extension')
self.assertEqual('Test_Port_Extension_extend', val)
# Test port update
data = {'port': {'port_extension': 'Test_Port_Extension_Update'}}
res = self._update('ports', port['
|
port']['id'], data)
val = res['port'].get('port_extension')
self.assertEqual('Test_Port_Extension_Update_update', val)
def test_extend_network_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_network') as ext_update_net,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_network_dict') as ext_net_dict,\
self.ne
|
twork() as network:
net_id = network['network']['id']
net_data = {'network': {'id': net_id}}
self._plugin.update_network(self._ctxt, net_id, net_data)
self.assertTrue(ext_update_net.called)
self.assertTrue(ext_net_dict.called)
def test_extend_subnet_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_subnet') as ext_update_subnet,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_subnet_dict') as ext_subnet_dict,\
self.subnet() as subnet:
subnet_id = subnet['subnet']['id']
subnet_data = {'subnet': {'id': subnet_id}}
self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data)
self.assertTrue(ext_update_subnet.called)
self.assertTrue(ext_subnet_dict.called)
def test_extend_port_dict(self):
with mock.patch.object(ext_test.TestExtensionDriver,
'process_update_port') as ext_update_port,\
mock.patch.object(ext_test.TestExtensionDriver,
'extend_port_dict') as ext_port_dict,\
self.port() as port:
port_id = port['port']['id']
port_data = {'port': {'id': port_id}}
self._plugin.update_port(self._ctxt, port_id, port_data)
self.assertTrue(ext_update_port.called)
self.assertTrue(ext_port_dict.called)
class DBExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase):
_extension_drivers = ['testdb']
def setUp(self):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(DBExtensionDriverTestCase, self).setUp()
self._plugin = manager.NeutronManager.get_plugin()
self._ctxt = context.get_admin_context()
def test_network_attr(self):
with self.network() as network:
# Test create with default value.
net_id = network['network']['id']
val = network['network']['network_extension']
self.assertEqual("", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("", val)
# Test list.
res = self._list('networks')
val = res['networks'][0]['network_extension']
self.assertEqual("", val)
# Test create with explict value.
res = self._create_network(self.fmt,
'test-network', True,
arg_list=('network_extension', ),
network_extension="abc")
network = self.deserialize(self.fmt, res)
net_id = network['network']['id']
val = network['network']['network_extension']
self.assertEqual("abc", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("abc", val)
# Test update.
data = {'network': {'network_extension': "def"}}
res = self._update('networks', net_id, data)
val = res['network']['network_extension']
self.assertEqual("def", val)
res = self._show('networks', net_id)
val = res['network']['network_extension']
self.assertEqual("def", val)
def test_subnet_attr(self):
with self.subnet() as subnet:
# Test create with default value.
net_id = subnet['subnet']['id']
val = subnet['subnet']['subnet_extension']
self.assertEqual("", val)
res = self._show('subnets', net_id)
val = res['subnet']['subnet_extension']
self.assertEqual("", val)
# Test list.
res = self._list('subnets')
val = res['subnets'][0]['subnet_extension']
self.assertEqual("", val)
with self.network() as network:
# Test create with explict value.
data = {'subnet':
{'network_id': network['network']['id'],
'cidr': '10.1.0.0/24',
'ip_version': '4',
'tenant_id': self._tenant_id,
'subnet_extension'
|
vivek8943/twitter-streamer
|
streamer/scripts/lps.py
|
Python
|
mit
| 351
| 0
|
#!/usr/bin/python
"""Print stats about
|
stdin per-line timings."""
import
|
signal
import sys
import time
start = time.time()
count = 0
try:
for line in sys.stdin:
count += 1
except KeyboardInterrupt:
print
pass
end = time.time()
et = end - start
lps = count / et
print "Elapsed time = %f, lines = %d, lps = %f" % (et, count, lps)
|
swtp1v07/Savu
|
savu/plugins/manchester_recon.py
|
Python
|
apache-2.0
| 7,508
| 0.000666
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: manchester_recon
:platform: Unix
:synopsis: An implementation of the Manchester code
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
from savu.data.process_data import CitationInfomration
from savu.data.structures import ProjectionData, VolumeData
from savu.plugins.gpu_filter import GpuFilter
import subprocess
import os
import logging
from xml.dom.minidom import parse
class ManchesterRecon(GpuFilter):
"""
A Plugin to apply a simple reconstruction with no dependancies
"""
def __init__(self):
super(ManchesterRecon, self).__init__("SimpleRecon")
def populate_default_parameters(self):
self.parameters['center_of_rotation'] = 83
def filter_frame(self, data):
"""
Process the frame with the manchester code
:param data: The data to filter
:type data: ndarray
:returns: The filtered image
"""
centre_of_rotation = self.parameters['center_of_rotation']
# Save out Sinograms
# Call processinng
# load in reconstructions
return None
def required_data_type(self):
"""
The input for this plugin is ProjectionData
:returns: ProjectionData
"""
return ProjectionData
def output_data_type(self):
"""
The output of this plugin is VolumeData
:returns: VolumeData
"""
return VolumeData
def get_citation_inforamtion(self):
cite_info = CitationInfomration()
cite_info.description = \
("TO POPULATE")
cite_info.bibtex = \
("TO POPULATE")
cite_info.endnote = \
("TO POPULATE")
cite_info.doi = "TO POPULATE"
return cite_info
def _set_gpu_device_number(self, input_xml_dom, gpu_number):
element = input_xml_dom.getElementsByTagName('GPUDeviceNumber')[0]
element.childNodes[0].nodeValue = gpu_number
def _set_sino_filename(self, input_xml_dom, folder, prefix, extension,
number_of_digits, first_file, last_file,
file_step):
element = input_xml_dom.getElementsByTagName("InputData")[0]
element.getElementsByTagName("Folder")[0].childNodes[0].nodeValue =\
folder
element.getElementsByTagName("Prefix")[0].childNodes[0].nodeValue =\
prefix
element.getElementsByTagName("Extension")[0].childNodes[0].nodeValue =\
extension
element.getElementsByTagName("NOD")[0].childNodes[0].nodeValue =\
number_of_digits
element.getElementsByTagName("FileFirst")[0].childNodes[0].nodeValue =\
first_file
element.getElementsByTagName("FileLast")[0].childNodes[0].nodeValue =\
last_file
element.getElementsByTagName("FileStep")[0].childNodes[0].nodeValue =\
file_step
def _set_recon_filename(self, input_xml_dom, folder, prefix, extension,
number_of_digits):
element = input_xml_dom.getElementsByTagName("OutputData")[0]
element.getElementsByTagName("Folder")[0].childNodes[0].nodeValue =\
folder
element.getElementsByTagName("Prefix")[0].childNodes[0].nodeValue =\
prefix
element.getElementsByTagName("Extension")[0].childNodes[0].nodeValue =\
extension
element.getElementsByTagName("NOD")[0].childNodes[0].nodeValue =\
number_of_digits
def _set_recon_centre(self, input_xml_dom, recon_centre):
element = input_xml_dom.getElementsByTagName("ImageCentre")[0]
element.childNodes[0].nodeValue = recon_centre
def _set_recon_range(self, input_xml_dom, recon_range):
element = input_xml_dom.getElementsByTagName("Transform")[0]
element.getElementsByTagName("RotationAngleType")[0].\
childNodes[0].nodeValue = 'Other'
element.getElementsByTagName("RotationAngle")[0].\
childNodes[0].nodeValue = recon_range
def _set_recon_rotation(self, input_xml_dom, recon_rotation):
element = input_xml_dom.getElementsByTagName("ROI")[0]
element.getElementsByTagName("Angle")[0].childNodes[0].nodeValue =\
recon_rotation
def _process_from_sinogram(self, sino_prefix, sino_number, centre,
output_directory,
template_xml_file="/home/ssg37927/I12/tomotest/chunk_002.xml",
gpu_number=0, recon_range=180.0,
recon_rotation=0.0):
self._process_multiple_from_sinogram(sino_prefix, sino_number,
sino_number, 1, centre,
output_directory,
template_xml_file, gpu_number,
recon_range, recon_rotation)
def _process_multiple_from_sinogram(self, sino_prefix, sino_start,
sino_end, sino_step, centre,
output_directory,
template_xml_file="/home/ssg37927/I12/tomotest/chunk_002.xml",
gpu_number=0, recon_range=180.0,
recon_rotation=0.0):
# construct the input file
dom = parse(template_xml_file)
self._set_gpu_device_number(dom, gpu_number)
tmpdir = os.getenv('TMPDIR')
logging.debug("tempdir = %s", tmpdir)
self._set_sino_filename(dom, tmpdir, sino_prefix, "tif", 5,
sino_start, sino_end, sino_step)
self._set_recon_range(dom, recon_range)
self._set_recon_rotation(dom, recon_rotation)
if centre < -9000:
print("No centre set, using default")
self._set_recon_filename(dom, output_directory, "recon_", "tif", 5)
else:
self._set_recon_filename(dom, output_directory,
"recon_%06i_" % (int(centre*100)), "tif",
5)
self._set_recon_centre(dom, centre)
xml_filename = tmpdir+"/input%05i
|
.xml" % sino_start
fh = open(xml_filename, 'w')
dom.writexml(open(xml_filename, 'w'))
fh.close()
# actually call the program
log_location = '/dls/tmp/tomopy/dt64/%i${USER}test.out' % (sino_start)
|
command = \
(". /etc/profile.d/modules.sh;" +
" module load i12;" +
" export CUDA_CACHE_DISABLE=1;" +
" echo BEFORE;" +
" dt64n %s &> %s;" % (xml_filename, log_location) +
" echo AFTER")
logging.debug("COMMAND CALLED'" + command + "'")
subprocess.call([command], shell=True)
|
CWolfRU/freedoom
|
lumps/colormap/colormap.py
|
Python
|
bsd-3-clause
| 6,534
| 0.02112
|
#!/usr/bin/env python
# Copyright (C) 2001 Colin Phipps <cphipps@doomworld.com>
# Copyright (C) 2008, 2013 Simon Howard
# Parts copyright (C) 1999 by id Software (http://www.idsoftware.com/)
#
# SPDX-License-Identifier: GPL-2.0+
#
# Takes PLAYPAL as input (filename is the only parameter)
# Produces a light graduated COLORMAP on stdout
# O(n^2)
#
# This was originally a Perl script by Colin Phipps; it was converted
# to Python and now is a more generic tool for generating all kinds of
# COLORMAP effects.
#
import os
import sys
import struct
# Parameters affecting colormap generation:
# "Darkness" is this color, which is usually black, but can be
# overridden (RGB 0-255):
dark_color = (0, 0, 0)
# Color to tint the colormap (RGB 0.0-1.0):
tint_color = (255, 255, 255)
# Fractional balance between tint and normal color. 0 is no tint applied,
# 1.0 is full tint.
tint_frac = 0
# Fudge factor to adjust brightness when calculating 'tinted' version
# of colors. Larger values are brighter but may cause color clipping.
# A value of 0.33 is a straight-average of the RGB channels. Maximum
# sensible value is 1.0, though it can be overdriven for fancy
# brightness effects.
tint_bright = 0.5
def read_palette(filename):
"""Read palette from file and return a list of tuples containing
RGB values."""
f = open(filename, "rb")
colors = []
for i in range(256):
data = f.read(3)
color = struct.unpack("BBB", data)
colors.append(color)
return colors
# Return closest palette entry to the given RGB triple
def search_palette(palette, target):
"""Search the given palette and find the nearest matching
color to the given color, returning an index into the
palette of the color that best matches."""
best_diff = None
best_index = None
def square(x):
return x * x
for i in range(len(palette)):
color = palette[i]
diff = square(target[0] - color[0]) \
+ square(target[1] - color[1]) \
+ square(target[2] - color[2])
if best_index is None or diff < best_diff:
best_diff = diff
best_index = i
return best_index
def generate_colormap(colors, palette):
"""Given a list of colors, translate these into indexes into
the given palette, finding the nearest color where an exact
match cannot be found."""
result = []
for color in colors:
index = search_palette(palette, color)
result.append(index)
return result
def tint_colors(colors, tint, bright=0.5):
"""Given a list of colors, tint them a particular color."""
result = []
for c in colors:
# I've experimented with different methods of calculating
# intensity, but this seems to work the best. This is basically
# doing an average of the full channels, but a straight
# average causes the picture to get darker - eg. (0,0,255)
# maps to (87,87,87). So we have a controllable brightness
# factor that allows the brightness to be adjusted.
intensity = min((c[0] + c[1] + c[2]) * bright, 255) / 255.0
result.append((
tint[0] * intensity,
tint[1] * intensity,
tint[2] * intensity,
))
return result
def blend_color
|
s(colors1, colors2, factor=0.5):
"""Blend the two given lists of colors, with 'factor' controlling
the mix between the two. factor=0 is exactly colors1, while
factor=1 is exactly colors2. Returns a list of blended colors."""
result = []
for index, c1 in enumerate(colors1):
c2 = colors
|
2[index]
result.append((
c2[0] * factor + c1[0] * (1 - factor),
c2[1] * factor + c1[1] * (1 - factor),
c2[2] * factor + c1[2] * (1 - factor),
))
return result
def invert_colors(colors):
"""Given a list of colors, translate them to inverted monochrome."""
result = []
for color in colors:
average = (color[0] + color[1] + color[2]) // 3
inverse = 255 - average
result.append((inverse, inverse, inverse))
return result
def solid_color_list(color):
"""Generate a 256-entry palette where all entries are the
same color."""
return [color] * 256
def output_colormap(colormap):
"""Output the given palette to stdout."""
for c in colormap:
x = struct.pack("B", c)
os.write(sys.stdout.fileno(), x)
def print_palette(colors):
for y in range(16):
for x in range(16):
color = colors[y * 16 + x]
print("#%02x%02x%02x" % color)
print()
def parse_color_code(s):
"""Parse a color code in HTML color code format, into an RGB
3-tuple value."""
if not s.startswith('#') or len(s) != 7:
raise Exception('Not in HTML color code form: %s' % s)
return (int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16))
def set_parameter(name, value):
"""Set configuration value, from command line parameters."""
global dark_color, tint_color, tint_frac, tint_bright
if name == 'dark_color':
dark_color = parse_color_code(value)
elif name == 'tint_color':
tint_color = parse_color_code(value)
elif name == 'tint_pct':
tint_frac = int(value) / 100.0
elif name == 'tint_bright':
tint_bright = float(value)
else:
raise Exception("Unknown parameter: '%s'" % name)
# Parse command line.
playpal_filename = None
for arg in sys.argv[1:]:
if arg.startswith('--') and '=' in arg:
key, val = arg[2:].split('=', 2)
set_parameter(key, val)
else:
playpal_filename = arg
if playpal_filename is None:
print("Usage: %s playpal.lmp > output-file.lmp" % sys.argv[0])
sys.exit(1)
palette = read_palette(playpal_filename)
colors = palette
# Apply tint, if enabled.
# The tint is intentionally applied *before* the darkening effect is
# applied. This allows us to darken to a different color than the tint
# color, if so desired.
if tint_frac > 0:
colors = blend_colors(palette,
tint_colors(colors, tint_color, tint_bright),
tint_frac)
# Generate colormaps for different darkness levels, by blending between
# the default colors and a palette where every entry is the "dark" color.
dark = solid_color_list(dark_color)
for i in range(32):
darken_factor = (32 - i) / 32.0
darkened_colors = blend_colors(dark, colors, darken_factor)
output_colormap(generate_colormap(darkened_colors, palette))
# Inverse color map for invulnerability effect.
inverse_colors = invert_colors(palette)
output_colormap(generate_colormap(inverse_colors, palette))
# Last colormap is all black, and is actually unused in Vanilla Doom
# (it was mistakenly included by the dcolors.c utility). It's
# strictly unneeded, though some utilities (SLADE) do not detect a
# lump as a COLORMAP unless it is the right length.
output_colormap(generate_colormap(dark, palette))
|
altendky/canmatrix
|
examples/encodeFrame.py
|
Python
|
bsd-2-clause
| 2,030
| 0.003941
|
#!/usr/bin/env python3
import canmatrix.formats
import sys
import optparse
# command line options...
usage = """
%prog [options] matrix
matrixX can be any of *.dbc|*.dbf|*.kcd|*.arxml
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-f", "--frames",
dest="frames",
help="encode list of frames",
default="*")
(cmdlineOptions, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
# load matrix
db = canmatrix.formats.loadp_flat(args[0])
#get all frames which match the commandline
frames = db.glob_frames(cmdlineOptions.frames)
#helper to read physical value from user
def read_signal_value_from_user(signal):
a = input("Enter Value for " + signal.name + " ")
if signal.is_float:
return float(a)
else:
return int(a)
# go through all frames
for frame in frames:
print (frame.name)
if frame.is_complex_multiplexed:
# ignore complex multiplexed signals
continue
if frame.is_multiplexed:
# if multiplexed frame search for multiplexer
multiplexer_signal = frame.get_multiplexer
# read multiplexer v
|
alue
a = input("Enter Value for Multiplexer " + multiplexer_signal.name + " ")
signalDict = dict()
signalDict[multiplex
|
er_signal.name] = int(a)
# read signals for the given multiplexer value
for signal in frame.get_signals_for_multiplexer_value(int(a)):
signalDict[signal.name] = read_signal_value_from_user(signal)
else:
# not multiplexed frame
signalDict = dict()
# go through all signals
for signal in frame.signals:
signalDict[signal.name] = read_signal_value_from_user(signal)
frame_data = frame.encode(signalDict)
if frame.arbitration_id.extended:
print("{:05X}#".format(frame.arbitration_id.id) + "".join(["%02X" % i for i in frame_data]))
else:
print("{:03X}#".format(frame.arbitration_id.id) + "".join(["%02X" % i for i in frame_data]))
|
domain51/d51.django.apps.sharing
|
d51/django/apps/sharing/views.py
|
Python
|
gpl-3.0
| 1,793
| 0.007808
|
from d51.django.auth.decorators import auth_required
from django.contrib.sites.models import Site
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ImproperlyConfigured
from .services import load_service, SharingServiceInvalidForm
from .models import URL
SHARE_KEY='u'
@auth_required()
def share_url(request, service_name):
# TODO: this view needs testing
response = HttpResponseRedirect(request.GET.get('next', '/'))
url_to_share = request.GET.get(SHARE_KEY, None)
if url_to_share is None:
# TODO change to a 400
raise Http404
else:
full_url_to_share = 'http://%s%s' % ((Site.objects.get_current().domain, url_to_share)) if url_to_share.find(':') == -1 else url_to_share
url, created = URL.objects.get_or_create(
url=full_url_to_share,
)
try:
url.send(service_name, request.user, request.POST)
except SharingServiceInva
|
lidForm:
service = load_service(service_name, url)
input = [] if request.method != 'POST' else [request.POST]
form = service.get_form_class()(*input)
templates, context = [
'sharing/%s/prompt.html'%service_name,
'sharing/prompt.html'
],{
|
'service_name':service_name,
'form': form,
'url':url_to_share,
'SHARE_KEY':SHARE_KEY,
'next':request.GET.get('next','/')
}
response = render_to_response(templates, context, context_instance=RequestContext(request))
except ImproperlyConfigured:
raise Http404
return response
|
sthirugn/robottelo
|
robottelo/ui/products.py
|
Python
|
gpl-3.0
| 2,755
| 0
|
"""Implements Products UI"""
from robottelo.ui.base import Base
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class Products(Base):
"""Manipulates Products from UI"""
is_katello = True
def navigate_to_entity(self):
"""Navigate to Product entity page"""
Navigator(self.browser).go_to_products()
def _search_locator(self):
"""Specify locator for Product key entity search procedure"""
return locators['prd.select']
def create(self, name, description=None, sync_plan=None, startdate=None,
create_sync_plan=False, gpg_key=None, sync_interval=None):
"""Creates new product from UI"""
self.click(locators['prd.new'])
self.assign_value(common_locators['name'], name)
if sync_plan and not create_sync_plan:
self.select(locators['prd.sync_plan'], sync_plan)
elif sync_plan and create_sync_plan:
self.click(locators['prd.new_sync_plan'])
self.assign_value(common_locators['name'], name)
if sync_interval:
self.select(locators['prd.sync_interval'], sync_interval)
self.assign_value(locators['prd.sync_startdate'], startdate)
self.click(common_locators['create'])
if gpg_key:
self.select(common_locators['gpg_key'], gpg_key)
if description:
self.assign_value(common_locators['description'], description)
self.click(common_locators['create'])
def update(self, name, new_name=None, new_desc=None,
new_sync_plan=None, new_gpg_key=None):
"""Updates product from UI"""
self.search_and_click(name)
self.click(tab_locators['prd.tab_details'])
if new_name:
self.click(locators['prd.name_edit'])
self.assign_value(locators['prd.name_update'], new_name)
self.click(common_locators['save'])
if new_desc:
self.click(locators['prd.desc_edit'])
self.assign_value(locators['p
|
rd.desc_update'], new_name)
self.click(common_locators['save'])
if new_gpg_key:
self.click(locators['prd.gpg_key_edit'])
self.select(locators['prd.gpg_key_update'], new_gpg_key)
self.click(common_locators['save'])
if new_sync_plan:
self.click(locators['prd.sync_plan_edit'])
|
self.select(locators['prd.sync_plan_update'], new_sync_plan)
self.click(common_locators['save'])
def delete(self, name, really=True):
"""Delete a product from UI"""
self.delete_entity(
name,
really,
locators['prd.remove'],
)
|
bewestphal/SeleniumAI
|
example/configuration.py
|
Python
|
mit
| 6,202
| 0.003386
|
import os
import keras.backend as K
import numpy as np
from keras.layers import Dense, Activation, Flatten, Convolution2D, Permute
from keras.layers.convolutional import Conv2D
from keras.models import Sequential
from models import AbstractConfiguration, KickoffModes
from rl.memory import SequentialMemory
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from package.environment import SeleniumEnvironment
from package.processor import SeleniumObservationProcessor
from srcdir import srcdir
class ExampleConfiguration(AbstractConfiguration):
mode = KickoffModes.train # type: KickoffModes
use_preset_training = True
render = True # Default true when testing
number_test_episodes = 1000
window_width = 375
window_height = 1020
window_length = 4
number_of_steps = 10000
gamma = 0.99
target_model_update = 10000
train_interval = 4
delta_clip = 1.
learning_rate = .00025
metrics = ['mae']
processor = SeleniumObservationProcessor(window_height, window_width)
memory = SequentialMemory(limit=1000000, window_length=window_length)
warmup_steps = 5000
weights_filename = 'dqn_selenium_ai_weights.h5f'
checkpoint_interval_steps = 250000
checkpoint_weights_filename_base = 'dqn_selenium_ai_weights_{step}.h5f'
base_policy = LinearAnnealedPolicy(
EpsGreedyQPolicy(),
attr='eps',
value_max=1.,
value_min=.1,
value_test=.05,
nb_steps=number_of_steps)
def __init__(self):
self.environment = SeleniumEnvironment(self)
@property
def policy(self):
if self.use_preset_training:
self.base_policy.select_action = lambda **_: self.get_preset_training_step()
return self.base_policy
def create_cnn_model(self):
input_shape = (self.window_length,) + (self.window_height, self.windo
|
w_width)
model = Sequential()
if K.image_dim_ordering() == 'tf': # Tensorflow
|
# (width, height, channels)
model.add(Permute((2, 3, 1), input_shape=input_shape))
elif K.image_dim_ordering() == 'th': # Theano
# (channels, width, height)
model.add(Permute((1, 2, 3), input_shape=input_shape))
else:
raise RuntimeError('Unknown image_dim_ordering.')
model.add(Conv2D(32, 8, 8, subsample=(4, 4)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 4, 4, subsample=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3, subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(self.environment.action_space.number_of_actions))
model.add(Activation('linear'))
return model
def on_step_reset(self, driver):
# Scroll to random point on the page
page_height = driver.execute_script("return document.documentElement.scrollHeight")
np_random = np.random.RandomState()
# scroll_position = np_random.randint(low=0, high=page_height) - self.window_height
scroll_position = np_random.randint(low=0, high=self.window_height-250)
driver.execute_script("window.scrollBy(0,{scroll_position});".format(scroll_position=scroll_position))
def on_environment_creation(self):
# Create file within the docker container to use as the test web page
filepath = srcdir + '/example/test.html'
self.environment.selenium_docker_wrapper.upload_file_to_container(filepath)
self.starting_url = 'file://' + '/' + os.path.basename(filepath)
def get_preset_training_step(self):
if self.environment.driver.current_url == 'file:///gp/aw/c/ref=mw_dp_buy_crt':
return 0
targeted_element_xpath = '//input[contains(@id, "add-to-cart-button")]'
target_element = self.environment.driver.find_element_by_xpath(targeted_element_xpath)
target_element_top = target_element.location["y"]
target_element_bottom = target_element_top + target_element.size["height"]
scroll_amount = self.environment.driver.execute_script("return window.pageYOffset;")
window_size = self.environment.driver.get_window_size()
inner_height = self.environment.driver.execute_script("return window.innerHeight;")
if scroll_amount > target_element_top:
action = self.environment.action_space.mouse_scroll_up
elif scroll_amount + inner_height < target_element_bottom:
action = self.environment.action_space.mouse_scroll_down
else:
target_element_left = target_element.location["x"]
target_element_right = target_element_left + target_element.size["width"]
if self.environment.action_space.mouse_position_x > target_element_right:
action = self.environment.action_space.move_mouse_left
elif self.environment.action_space.mouse_position_x < target_element_left:
action = self.environment.action_space.move_mouse_right
elif self.environment.action_space.mouse_position_y - (window_size["height"] - inner_height) + scroll_amount \
> target_element_bottom:
action = self.environment.action_space.move_mouse_up
elif self.environment.action_space.mouse_position_y - (window_size["height"] - inner_height) + scroll_amount \
< target_element_top:
action = self.environment.action_space.move_mouse_down
else:
action = self.environment.action_space.mouse_press
return self.environment.action_space.available_actions.index(action)
def determine_reward(self, driver, action_index):
reward_indicator_url = 'file:///gp/aw/c/ref=mw_dp_buy_crt'
if driver.current_url == reward_indicator_url:
reward = 1
done = True
elif action_index == 0 and driver.current_url != reward_indicator_url:
reward = -1
done = True
else:
reward = 0
done = False
return done, reward
|
pombredanne/bokeh
|
tests/plugins/phantomjs_screenshot.py
|
Python
|
bsd-3-clause
| 889
| 0.004499
|
import json
import pytest
import subprocess
import sys
from os.path import join, dirname
from .utils import info, fail
def pytest_addoption(parser):
parser.addoption(
"--phantomjs", type=str, default="phantomjs", help="phantomjs executable"
)
def get_phantomjs_screenshot(url, screen
|
shot_path, wait, width=1000, height=1000):
"""
wait is in milliseconds
"""
phantomjs = pytest.config.getoption('phantomjs')
cmd = [phantomjs, join(dirname(__file__), "phantomjs_screenshot.js"), url, screenshot_path, str(wait), str(width), str(height)]
info("Running command: %s" % " ".join(cmd))
try:
|
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
except OSError:
fail("Failed to run: %s" % " ".join(cmd))
sys.exit(1)
return json.loads(proc.stdout.read().decode("utf-8"))
|
AdrianGaudebert/socorro-crashstats
|
vendor-local/lib/python/pyasn1/codec/ber/encoder.py
|
Python
|
mpl-2.0
| 12,392
| 0.004438
|
# BER encoder
from pyasn1.type import base, tag, univ, char, useful
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import int2oct, ints2octs, null, str2octs
from pyasn1 import error
class Error(Exception): pass
class AbstractItemEncoder:
supportIndefLenMode = 1
def encodeTag(self, t, isConstructed):
tagClass, tagFormat, tagId = t.asTuple() # this is a hotspot
v = tagClass | tagFormat
if isConstructed:
v = v|tag.tagFormatConstructed
if tagId < 31:
return int2oct(v|tagId)
else:
s = int2oct(tagId&0x7f)
tagId = tagId >> 7
while tagId:
s = int2oct(0x80|(tagId&0x7f)) + s
tagId = tagId >> 7
return int2oct(v|0x1F) + s
def encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return int2oct(0x80)
if length < 0x80:
return int2oct(length)
|
else:
substrate = null
while length:
substrate = int2oct(length&0xff) + substrate
length = length >> 8
substrateLen = len(substrate)
|
if substrateLen > 126:
raise Error('Length octets overflow (%d)' % substrateLen)
return int2oct(0x80 | substrateLen) + substrate
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
raise Error('Not implemented')
def _encodeEndOfOctets(self, encodeFun, defMode):
if defMode or not self.supportIndefLenMode:
return null
else:
return encodeFun(eoo.endOfOctets, defMode)
def encode(self, encodeFun, value, defMode, maxChunkSize):
substrate, isConstructed = self.encodeValue(
encodeFun, value, defMode, maxChunkSize
)
tagSet = value.getTagSet()
if tagSet:
if not isConstructed: # primitive form implies definite mode
defMode = 1
return self.encodeTag(
tagSet[-1], isConstructed
) + self.encodeLength(
len(substrate), defMode
) + substrate + self._encodeEndOfOctets(encodeFun, defMode)
else:
return substrate # untagged value
class EndOfOctetsEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.getTagSet()[:-1],
cloneValueFlag=1)
else:
value = value.clone(tagSet=value.getTagSet()[:-1])
return encodeFun(value, defMode, maxChunkSize), 1
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
octets = []
value = int(value) # to save on ops on asn1 type
while 1:
octets.insert(0, value & 0xff)
if value == 0 or value == -1:
break
value = value >> 8
if value == 0 and octets[0] & 0x80:
octets.insert(0, 0)
while len(octets) > 1 and \
(octets[0] == 0 and octets[1] & 0x80 == 0 or \
octets[0] == 0xff and octets[1] & 0x80 != 0):
del octets[0]
return ints2octs(octets), 0
class BitStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize*8:
r = {}; l = len(value); p = 0; j = 7
while p < l:
i, j = divmod(p, 8)
r[i] = r.get(i,0) | value[p]<<(7-j)
p = p + 1
keys = list(r); keys.sort()
return int2oct(7-j) + ints2octs([r[k] for k in keys]), 0
else:
pos = 0; substrate = null
while 1:
# count in octets
v = value.clone(value[pos*8:pos*8+maxChunkSize*8])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class OctetStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize:
return value.asOctets(), 0
else:
pos = 0; substrate = null
while 1:
v = value.clone(value[pos:pos+maxChunkSize])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
precomputedValues = {
(1, 3, 6, 1, 2): (43, 6, 1, 2),
(1, 3, 6, 1, 4): (43, 6, 1, 4)
}
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
oid = value.asTuple()
if oid[:5] in self.precomputedValues:
octets = self.precomputedValues[oid[:5]]
index = 5
else:
if len(oid) < 2:
raise error.PyAsn1Error('Short OID %s' % value)
# Build the first twos
index = 0
subid = oid[index] * 40
subid = subid + oid[index+1]
if subid < 0 or subid > 0xff:
raise error.PyAsn1Error(
'Initial sub-ID overflow %s in OID %s' % (oid[index:], value)
)
octets = (subid,)
index = index + 2
# Cycle through subids
for subid in oid[index:]:
if subid > -1 and subid < 128:
# Optimize for the common case
octets = octets + (subid & 0x7f,)
elif subid < 0 or subid > 0xFFFFFFFF:
raise error.PyAsn1Error(
'SubId overflow %s in %s' % (subid, value)
)
else:
# Pack large Sub-Object IDs
res = (subid & 0x7f,)
subid = subid >> 7
while subid > 0:
res = (0x80 | (subid & 0x7f),) + res
subid = subid >> 7
# Add packed Sub-Object ID to resulted Object ID
octets += res
return ints2octs(octets), 0
class RealEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if value.isPlusInfinity():
return int2oct(0x40), 0
if value.isMinusInfinity():
return int2oct(0x41), 0
m, b, e = value
if not m:
return null, 0
if b == 10:
return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), 0
elif b == 2:
fo = 0x80 # binary enoding
if m < 0:
fo = fo | 0x40 # sign bit
m = -m
while int(m) != m: # drop floating point
m *= 2
e -= 1
while m & 0x1 == 0: # mantissa normalization
m >>= 1
e += 1
eo = null
while e:
eo = int2oct(e&0xff) + eo
e >>= 8
n = len(eo)
if n > 0xff:
raise error.PyAsn1Error('Real exponent overflow')
if n == 1:
pass
elif n == 2:
fo |= 1
elif n == 3:
fo |= 2
else:
fo |= 3
eo = int2oct(n//0xff+1) + eo
po = null
while m:
|
GetSomeBlocks/Score_Soccer
|
resources/lib/tvdb_api/tvdb_api.py
|
Python
|
mit
| 29,127
| 0.006111
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
"""Simple-to-use Python interface to The TVDB's API (www.thetvdb.com)
Example usage:
>>> from tvdb_api import Tvdb
>>> t = Tvdb()
>>> t['Lost'][4][11]['episodename']
u'Cabin Fever'
"""
__author__ = "dbr/Ben"
__version__ = "1.5"
import os
import sys
import urllib
import urllib2
import StringIO
import tempfile
import warnings
import logging
import datetime
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
import elementtree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from cache import CacheHandler
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
lastTimeout = None
def log():
return logging.getLogger("tvdb_api")
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
pass
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term = None, key = None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term = term, key = key)
if len(searchresult) != 0:
results.extend(searchresult)
#end for cur_season
return results
class Season(dict):
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term = None, key = None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term = term, key = key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term = None, key = None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find( unicode(term).lower() ) > -1:
return self
#end if cur_value.find()
#end for cur_key, cur_value
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive = False,
select_first = False,
debug = False,
cache = True,
banners = False,
actors = False,
|
custom_ui = None,
language = None,
search_all_languages = False,
apikey = None,
forceCon
|
nect=False):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the
|
lindseypack/NIM
|
code/ap/apInv.py
|
Python
|
mit
| 5,106
| 0.009009
|
#!usr/bin/env python
from pysnmp.entity.rfc3413.oneliner import cmdgen
import shlex
import subprocess
import re
import os
import sys
import smtplib
from devices.models import AP as AccessPoint
## This file is used to update the access point inventory data. Use the
## updateAccessPoints function to run the update. The function
## updateStatus will only check if the APs are up or down, and send an
## email report on APs that are currently down or that have recovered.
## Do an snmpwalk using cmdgen from PySNMP to get data about each AP.
## Takes a dictionary of OIDs and a list of controller IPs.
def snmpwalk(OIDs, controllers):
APs = dict()
cmdGen = cmdgen.CommandGenerator()
for key in OIDs:
for controller in controllers:
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.bulkCmd(
cmdgen.CommunityData('spa'),
cmdgen.UdpTransportTarget((controller, 161)),
0, 1000, OIDs[key]
)
for varBindTableRow in varBindTable:
for name, val in varBindTableRow:
## make a unique identifier for each AP
num = str(name)[len(OIDs["mac"]):].strip('.')
try:
if key not in APs[num]:
APs[num][key] = str(val.prettyPrint())
except:
APs[num] = {key: str(val.prettyPrint())}
return APs
## Add or update all access points using Django.
def updateAccessPoints(path, AP_OIDs, controller_IPs):
APs = snmpwalk(AP_OIDs, controller_IPs)
for AP in APs:
if APs[AP]["serial"] != "0":
## make a new AP object if necessary
try:
new_AP = AccessPoint(serialno=APs[AP]["serial"], ip=APs[AP]["ip"])
new_AP.save()
except:
pass
## Update the AP's data
update_AP = AccessPoint.objects.get(serialno=APs[AP]["serial"], autoupdate=1)
update_AP.ip = APs[AP]["ip"]
update_AP.mac = APs[AP]["mac"].lower()[2:]
update_AP.name = APs[AP]["name"]
update_AP.model = APs[AP]["model"]
update_AP.save()
## Get the names of all the access points which are currently up and connected to
## a controller. Compare to the names in the database to find the APs that are down.
def updateStatus(controller_IPs, status_oid, email):
AP_command = []
for controller in controller_IPs:
AP_command.append("snmpwalk -v2c -c spa " + controller + " " + status_oid)
# Get the names of the APs connected to each controller.
# Compare to APs stored in the database to determine which are down and
# which have recovered.
upAPs = []
for cmd in AP_command:
upAPs.extend(runCommand(cmd))
storedAPs = AccessPoint.objects.all()
downAPs = []
recoveredAPs = []
for ap in storedAPs:
if ap.name not in upAPs:
ap.laststatus = "down"
if ap.checkstatus == True:
downAPs.append(ap)
else:
if ap.laststatus == "down" and ap.checkstatus == True:
recoveredAPs.append(ap)
ap.laststatus = "up"
ap.save()
# Send emails about down or recovered access points.
if len(downAPs) > 0:
message = '\nThe following access points are not responding:\n'
subject = 'APs are not responding'
sendEmail(message, subject, downAPs, email)
if len(recoveredAPs) > 0:
message = '\nThe following access points were down but have recovered:\n'
subject = 'APs have recovered'
sendEmail(message, subject, recoveredAPs, email)
#takes a string "com" and runs the command, returning a list of AP names
def runCommand(com):
args = shlex.split(com) #separates com into indv. args
p = subprocess.Popen(args, stdout=subprocess.PIPE) #runs command, saves stdout
#communicate() returns a tuple (stdout,stderr) but we only want stdout
stdout = p.communicate()[0]
#clean the data
stdout = stdout.replace("SNMPv2-SMI::enterprises.","")
stdout = stdout.replace("Hex-STRING:","")
stdout = stdout.replace("STRING:","")
stdout = stdout.replace("IpAddress:","")
stdout = stdout.replace("\"", "")
stdout = stdout.replace(" ", "")
#split stdout into lines
stdoutLines = stdout.split("\n")
|
stdoutLines = stdoutLines[:-1] #removes last empty row
#parse stdout into list
names = []
for line in stdoutLines:
names.append(line.split("=")[1])
return names
## Send an email report on access point status.
def sendEmail(messageBody, subject, APs, email):
for ap in APs:
messageBody += "\t" + ap.
|
ip + "\t" + ap.name + "\n"
toHeaderBuild = []
for to in email["to"]:
toHeaderBuild.append("<" + to + ">")
msg = "From: <" + email["from"] + "> \nTo: " + ', '.join(toHeaderBuild) + " \nSubject: " + subject + " \n" + messageBody
s = smtplib.SMTP(email["server"])
s.sendmail(email["from"], email["to"], msg)
s.quit()
|
lukaszb/django-richtemplates
|
example_project/urls.py
|
Python
|
bsd-3-clause
| 705
| 0.002837
|
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
url
|
patterns = patterns('',
(r'^accounts/', include('registration.urls')),
url(r'^admin_tools/', include('admin_tools.urls')),
(r'^admin/', include(admin.site.urls)),
url(r'^users/(?P<username>\w+)/$',
view='examples.views.userprofile',
name='richtemplates_examples_userprofile'),
url(r'^users/(?P<username>\w+)/edit/$',
view='examples.views.userprofile_edit',
name='richtemplates_examples_userprofile_edit'),
# example app's urls
(r'^', inclu
|
de('examples.urls')),
)
|
rosmo/ansible
|
lib/ansible/modules/storage/netapp/na_elementsw_backup.py
|
Python
|
gpl-3.0
| 9,175
| 0.003379
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
Element Software Backup Manager
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_backup
short_description: NetApp Element Software Create Backups
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Create backup
options:
src_hostname:
description:
- hostname for the backup source cluster
required: true
aliases:
- hostname
src_username:
description:
- username for the backup source cluster
required: true
aliases:
- username
- user
src_password:
description:
- password for the backup source cluster
required: true
aliases:
- password
- pass
src_volume_id:
description:
- ID of the backup source volume.
required: true
aliases:
- volume_id
dest_hostname:
description:
- hostname for the backup source cluster
- will be set equal to src_hostname if not specified
required: false
dest_username:
description:
- username for the backup destination cluster
- will be set equal to src_username if not specified
required: false
dest_password:
description:
- password for the backup destination cluster
- will be set equal to src_password if not specified
required: false
dest_volume_id:
description:
- ID of the backup destination volume
required: true
format:
description:
- Backup format to use
choices: ['native','uncompressed']
required: false
default: 'native'
script:
description:
- the backup script to be executed
required: false
script_parameters:
description:
- the backup script parameters
required: false
'''
EXAMPLES = """
na_elementsw_backup:
src_hostname: "{{ source_cluster_hostname }}"
src_username: "{{ source_cluster_username }}"
src_password: "{{ source_cluster_password }}"
src_volume_id: 1
dest_hostname: "{{ destination_cluster_hostname }}"
dest_username: "{{ destination_cluster_username }}"
dest_password: "{{ destination_cluster_password }}"
dest_volume_id: 3
format: native
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_elementsw_module import NaElementSWModule
import time
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
import solidfire.common
except Exception:
HAS_SF_SDK = False
class ElementSWBackup(object):
''' class to handle backup operations '''
def __init__(self):
"""
Setup Ansible parameters and SolidFire connection
"""
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
src_hostname=dict(aliases=['hostname'], required=True, type='str'),
src_username=dict(aliases=['username', 'user'], required=True, type='str'),
src_password=dict(aliases=['password', 'pass'], required=True, type='str', no_log=True),
src_volume_id=dict(aliases=['volume_id'], required=True, type='str'),
dest_hostname=dict(required=False, type='str'),
dest_username=dict(required=False, type='str'),
dest_password=dict(required=False, type='str', no_log=True),
de
|
st_volume_id=dict(required=True, type='str'),
format=dict(required=False, choices=['native', 'uncompressed'], default='native'),
script=dict(required=False, type='str'),
script_parameters=dict(required=False, type='dict')
))
self.module =
|
AnsibleModule(
argument_spec=self.argument_spec,
required_together=[['script', 'script_parameters']],
supports_check_mode=True
)
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
# If destination cluster details are not specified , set the destination to be the same as the source
if self.module.params["dest_hostname"] is None:
self.module.params["dest_hostname"] = self.module.params["src_hostname"]
if self.module.params["dest_username"] is None:
self.module.params["dest_username"] = self.module.params["src_username"]
if self.module.params["dest_password"] is None:
self.module.params["dest_password"] = self.module.params["src_password"]
params = self.module.params
# establish a connection to both source and destination sf clusters
self.module.params["username"] = params["src_username"]
self.module.params["password"] = params["src_password"]
self.module.params["hostname"] = params["src_hostname"]
self.src_connection = netapp_utils.create_sf_connection(self.module)
self.module.params["username"] = params["dest_username"]
self.module.params["password"] = params["dest_password"]
self.module.params["hostname"] = params["dest_hostname"]
self.dest_connection = netapp_utils.create_sf_connection(self.module)
self.elementsw_helper = NaElementSWModule(self.sfe)
# add telemetry attributes
self.attributes = self.elementsw_helper.set_element_attributes(source='na_elementsw_backup')
def apply(self):
"""
Apply backup creation logic
"""
self.create_backup()
self.module.exit_json(changed=True)
def create_backup(self):
"""
Create backup
"""
# Start volume write on destination cluster
try:
write_obj = self.dest_connection.start_bulk_volume_write(volume_id=self.module.params["dest_volume_id"],
format=self.module.params["format"],
attributes=self.attributes)
write_key = write_obj.key
except solidfire.common.ApiServerError as err:
self.module.fail_json(msg="Error starting bulk write on destination cluster", exception=to_native(err))
# Set script parameters if not passed by user
# These parameters are equivalent to the options used when a backup is executed via the GUI
if self.module.params["script"] is None and self.module.params["script_parameters"] is None:
self.module.params["script"] = 'bv_internal.py'
self.module.params["script_parameters"] = {"write": {
"mvip": self.module.params["dest_hostname"],
"username": self.module.params["dest_username"],
"password": self.module.params["dest_password"],
"key": write_key,
"endpoint": "solidfire",
"format": self.module.params["format"]},
"range": {"lba": 0, "blocks": 244224}}
# Start volume read on source cluster
try:
read_obj = self.src_connection.start_bulk_volume_read(self.module.params["src_volume_id"],
self.module.params["format"],
script=self.module.params["script"],
script_parameters=self.module.params["script_parameters"],
attributes=self.attributes)
except solidfire.common.ApiServerError as err:
|
elhoyos/colombiatransparente
|
transparencia/models.py
|
Python
|
gpl-3.0
| 2,979
| 0.004028
|
from django.db import models
from django.db.models.signals import post_save
from dj
|
ango.contrib.auth.models import User
from sorl import thumbnail
class PerfilColumnista(models.Model):
user = models.OneToOneField(User, primary_key=True)
bio = models.TextField(blank=True, nul
|
l=True)
image = thumbnail.ImageField(upload_to='img/columnistas', blank=True, null=True)
def crear_perfil_columnista(sender, instance, created, **kwargs):
if created:
perfil = PerfilColumnista()
perfil.user = instance
perfil.save()
post_save.connect(crear_perfil_columnista, sender=User)
ESTANCADO = 0
EN_PROCESO = 1
INCUMPLIDA = 2
A_MEDIAS = 3
CUMPLIDA = 4
ESTATUS_OPCIONES = (
(ESTANCADO, "Estancado"),
(EN_PROCESO, "En Proceso"),
(INCUMPLIDA, "Incumplida"),
(A_MEDIAS, "A Medias"),
(CUMPLIDA, "Cumplida"),
)
class Promesa(models.Model):
titulo = models.CharField(max_length=200)
columnista = models.ForeignKey(User)
actualizado = models.DateTimeField(auto_now=True)
slug = models.SlugField()
estatus = models.IntegerField(choices=ESTATUS_OPCIONES)
descripcion = models.TextField()
arriba = models.IntegerField(default=0, editable=False)
abajo = models.IntegerField(default=0, editable=False)
compartido = models.IntegerField(default=0, editable=False)
def __unicode__(self):
return self.titulo
class Personaje(models.Model):
nombre = models.CharField(max_length=64)
columnista = models.ForeignKey(User)
actualizado = models.DateTimeField(auto_now=True)
slug = models.SlugField()
descripcion = models.TextField()
image = thumbnail.ImageField(upload_to='img/personajes')
def __unicode__(self):
return self.nombre
class Cargo(models.Model):
personaje = models.ForeignKey(Personaje)
titulo = models.CharField(max_length=64)
actual = models.BooleanField()
inicio = models.DateField()
terminacion = models.DateField(blank=True, null=True)
def __unicode__(self):
if self.terminacion:
return "%s, %s, %s - %s" % (self.personaje.nombre, self.titulo, self.inicio.year, self.terminacion.year)
else:
return "%s, %s, %s" % (self.personaje.nombre, self.titulo, self.inicio.year)
class Meta:
unique_together = (
('personaje', 'titulo', 'inicio',),
)
class PromesaCargo(models.Model):
promesa = models.ForeignKey(Promesa)
cargo = models.ForeignKey(Cargo)
class Etiqueta(models.Model):
columnista = models.ForeignKey(User)
actualizado = models.DateTimeField(auto_now=True)
texto = models.CharField(max_length=32, unique=True)
descripcion = models.TextField()
def __unicode__(self):
return self.texto
def save(self):
self.texto = self.texto.lower()
super(Etiqueta, self).save()
class PromesaEtiqueta(models.Model):
promesa = models.ForeignKey(Promesa)
etiqueta = models.ForeignKey(Etiqueta)
|
hfp/tensorflow-xsmm
|
tensorflow/python/kernel_tests/list_ops_test.py
|
Python
|
apache-2.0
| 51,102
| 0.009236
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops which manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np # pylint: disable=unused-import
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testPushPop(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testPushPop(self, m
|
ax_num_elemen
|
ts):
self._testPushPop(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testPushPopGPU(self, max_num_elements):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testPushPop(max_num_elements)
@test_util.run_deprecated_v1
def testPushInFullListFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
l = list_ops.tensor_list_push_back(l, 2.)
self.evaluate(l)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testPopFromEmptyTensorListFails(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to pop from an empty list"):
l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(l)
def _testStack(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
if not context.executing_eagerly():
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testStack(self, max_num_elements):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testStackGPU(self, max_num_elements):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithUnknownElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithPartiallyDefinedElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "unequal shapes"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testStackEmptyList(self, max_num_elements):
# Should be able to stack empty lists with fully defined element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2))
# Should not be able to stack empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to stack empty lists with undefined element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.
|
varunagrawal/ClassNotes
|
notes/views.py
|
Python
|
mit
| 4,070
| 0.009337
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
import onenote
import bitbucket
# sample repo_uuid = 3d316876-a913-4b20-b183-d57e919f96dc
BASE_URL = "https://afternoon-waters-2404.herokuapp.com"
# DEV_URL = "https://classnotes-varunagrawal.c9.io"
# Create your views here.
def index(request):
bitbucket_login_url = "{0}/notes/atlas_signin".format(BASE_URL)
microsoft_login_url = "{0}/notes/ms_signin".format(BASE_URL)
repo_uuid = None
if "repo_uuid" in request.GET:
repo_uuid = request.GET["repo_uuid"]
# bitbucket_login = bitbucket.is_logged_in(repo_uuid)
# bitbucket.set_repo_uuid(repo_uuid)
# response = render(request, 'index.html?bitbucket_login={0}'.format(bitbucket_login))
need_ms_signin = onenote.get_auth_token(request) is None
need_atlassian_signin = not bitbucket.is_logged_in(repo_uuid)
context = {'bitbucket_login': bitbucket_login_url, 'need_atlassian_signin': need_atlassian_signin,
'microsoft_login': microsoft_login_url, 'need_ms_signin': need_ms_signin}
response = render(request, 'index.html', context)
response.set_cookie('repo_uuid', repo_uuid)
return response
def ms_sign_in(request):
print("Redirecting user")
redirect_to = "https://login.live.com/oauth20_authorize.srf?client_id={0}&scope=wl.basic%20office.onenote_create%20office.onenote&response_type=code&redirect_uri={1}".format(onenote.CLIENT_ID, onenote.REDIRECT_URI)
return HttpResponseRedirect(redirect_to)
def ms_signed_in(request):
if onenote.verify(request):
data = onenote.get_auth_token(request)
return HttpResponseRedirect("/notes/notebooks")
else:
return HttpResponse("Please provide permission to ClassNotes.\n {0}".format(request))
def atlas_sign_in(request):
print("Bitbucket Sign in")
return HttpResponseRedirect(bitbucket.get_auth_url())
def atlas_signed_in(request):
if bitbucket.verify(request):
print("getting token")
bitbucket.get_auth_token(request)
print("got token")
return HttpResponseRedirect("/notes?bitbucket_login=1")
else:
return HttpResponse("Please grant access to Bitbucket, {0}".format(request.GET))
def notebooks(request):
if 'repo_uuid' in request.COOKIES:
repo_uuid = request.COOKIES['repo_uuid']
books = onenote.get_notebooks(repo_uuid)
context = {'notebooks': books}
# print(notebooks
return render(request, 'notebooks.html', context)#("List of Notebooks\n{0}".format(str(notebooks)))
def sections(request):
if 'repo_uuid' in request.COOKIES:
repo_uuid = request.COOKIES['repo_uuid']
sections = onenote.get_sections(request.GET["id"], request.GET["name"], repo_uuid)
context = {'sections': sections}
# return HttpResponse(sections["value"])
return render(request, 'sections.html', context)
def pages(request):
if 'repo_uuid' in request.COOKIES:
repo_uuid = request.COOKIES['repo_uuid']
pages = onenote.get_pages(request.GET["id"], request.GET["name"], repo_uuid)
context = {'pages': pages}
# print(request.GET["name"]
# write the pages to the repository Wiki and stay on the page
if not bitbucket.is_logged_in(repo_uuid):
return HttpResponseRedirect("/notes")
# code to write to wiki
page_links_md = onenote.get_page_links_md(pages)
bitbucket.add_to_wiki(
|
page_links_md, repo_uuid)
# return HttpResponse(str(pages))
return render(request, 'pages.html', context)
# return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def page(request):
repo_uuid = None
if 'repo_uuid' in request.COOKIES:
repo_uuid = request.COOKIES['repo_uuid']
page = oneno
|
te.get_page_content(request.GET["id"], repo_uuid)
# context = { 'page': page }
# return render(request, 'page.html', context)
return HttpResponse(str(page))
|
lucaskanashiro/debile
|
debile/slave/runners/pep8.py
|
Python
|
mit
| 1,811
| 0
|
# Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
# Copyright (c) 2013 Leo Cavaille <leo@cavaille.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from debile
|
.slave.wrappers.pep8 import parse_pep8
from debile.slave.utils import cd
from debile.utils.commands import run_command
def pep8(dsc, analysis):
run_command(["dpkg-source", "-x", dsc, "source-pep8"])
with cd('source-pep8'):
out, _, ret = run_command(['pep8', '.'])
failed = ret != 0
for issue in parse_pep8(out.splitlines()):
analysis.results.append(issue)
return (analysis, out, failed, None, None)
def version():
out, _, ret
|
= run_command(['pep8', '--version'])
if ret != 0:
raise Exception("pep8 is not installed")
return ('pep8', out.strip())
|
wiki-ai/revscoring
|
revscoring/scoring/models/gradient_boosting.py
|
Python
|
mit
| 462
| 0
|
"""
A collection of Gradient Boosting type classifier models.
.. autoclass:: revscoring.scoring.models.GradientBoosting
:members:
:member-order:
"""
import logging
from sk
|
learn.ensemble import GradientBoostingClassifier
from .sklearn import ProbabilityClassifier
logger = logging.getLogger(__name__)
class GradientBoosting(ProbabilityClassifier):
"""
Implements a Gradient Boosting mo
|
del.
"""
Estimator = GradientBoostingClassifier
|
jnayak1/cs3240-s16-team16
|
login/admin.py
|
Python
|
mit
| 331
| 0.003021
|
from django.contr
|
ib import admin
from login.models import Category, Page
from login.models import UserProfile
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug':('name',)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Page)
admin.site.register(UserProfile)
# Register your models here
|
.
|
justusc/Elemental
|
python/optimization/util.py
|
Python
|
bsd-3-clause
| 18,829
| 0.038239
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from El.core import *
# Coherence
# =========
lib.ElCoherence_s.argtypes = \
lib.ElCoherence_c.argtypes = \
lib.ElCoherenceDist_s.argtypes = \
lib.ElCoherenceDist_c.argtypes = \
[c_void_p,POINTER(sType)]
lib.ElCoherence_d.argtypes = \
lib.ElCoherence_z.argtypes = \
lib.ElCoherenceDist_d.argtypes = \
lib.ElCoherenceDist_z.argtypes = \
[c_void_p,POINTER(dType)]
def Coherence(A):
value = TagToType(Base(A.tag))()
args = [A.obj,pointer(value)]
if type(A) is Matrix:
if A.tag == sTag: lib.ElCoherence_s(*args)
elif A.tag == dTag: lib.ElCoherence_d(*args)
elif A.tag == cTag: lib.ElCoherence_c(*args)
elif A.tag == zTag: lib.ElCoherence_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElCoherenceDist_s(*args)
elif A.tag == dTag: lib.ElCoherenceDist_d(*args)
elif A.tag == cTag: lib.ElCoherenceDist_c(*args)
elif A.tag == zTag: lib.ElCoherenceDist_z(*args)
else: DataExcept()
else: TypeExcept()
return value
# Covariance
# ==========
lib.ElCovariance_s.argtypes = \
lib.ElCovariance_d.argtypes = \
lib.ElCovariance_c.argtypes = \
lib.ElCovariance_z.argtypes = \
lib.ElCovarianceDist_s.argtypes = \
lib.ElCovarianceDist_d.argtypes = \
lib.ElCovarianceDist_c.argtypes = \
lib.ElCovarianceDist_z.argtypes = \
[c_void_p,c_void_p]
def Covariance(D):
if type(D) is Matrix:
S = Matrix(D.tag)
args = [D.obj,S.obj]
if D.tag == sTag: lib.ElCovariance_s(*args)
elif D.tag == dTag: lib.ElCovariance_d(*args)
elif D.tag == cTag: lib.ElCovariance_c(*args)
elif D.tag == zTag: lib.ElCovariance_z(*args)
else: DataExcept()
return S
elif type(D) is DistMatrix:
S = DistMatrix(D.tag,MC,MR,D.Grid())
args = [D.obj,S.obj]
if D.tag == sTag: lib.ElCovarianceDist_s(*args)
elif D.tag == dTag: lib.ElCovarianceDist_d(*args)
elif D.tag == cTag: lib.ElCovarianceDist_c(*args)
elif D.tag == zTag: lib.ElCovarianceDist_z(*args)
else: DataExcept()
return S
else: TypeExcept()
# Log barrier
# ===========
lib.ElLogBarrier_s.argtypes = \
lib.ElLogBarrier_c.argtypes = \
lib.ElLogBarrierDist_s.argtypes = \
lib.ElLogBarrierDist_c.argtypes = \
[c_uint,c_void_p,POINTER(sType)]
lib.ElLogBarrier_d.argtypes = \
lib.ElLogBarrier_z.argtypes = \
lib.ElLogBarrierDist_d.argtypes = \
lib.ElLogBarrierDist_z.argtypes = \
[c_uint,c_void_p,POINTER(dType)]
def LogBarrier(uplo,A):
barrier = TagToType(Base(A.tag))()
args = [uplo,A.obj,pointer(barrier)]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLogBarrier_s(*args)
elif A.tag == dTag: lib.ElLogBarrier_d(*args)
elif A.tag == cTag: lib.ElLogBarrier_c(*args)
elif A.tag == zTag: lib.ElLogBarrier_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLogBarrierDist_s(*args)
elif A.tag == dTag: lib.ElLogBarrierDist_d(*args)
elif A.tag == cTag: lib.ElLogBarrierDist_c(*args)
elif A.tag == zTag: lib.ElLogBarrierDist_z(*args)
else: DataExcept()
else: TypeExcept()
return barrier
# Log-det divergence
# ==================
lib.ElLogDetDiv_s.argtypes = \
lib.ElLogDetDiv_c.argtypes = \
lib.ElLogDetDivDist_s.argtypes = \
lib.ElLogDetDivDist_c.argtypes = \
[c_uint,c_void_p,c_void_p,POINTER(sType)]
lib.ElLogDetDiv_d.argtypes = \
lib.ElLogDetDiv_z.argtypes = \
lib.ElLogDetDivDist_d.argtypes = \
lib.ElLogDetDivDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,POINTER(dType)]
def LogDetDiv(uplo,A,B):
div = TagToType(Base(A.tag))()
args = [uplo,A.obj,B.obj,pointer(div)]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLogDetDiv_s(*args)
elif A.tag == dTag: lib.ElLogDetDiv_d(*args)
elif A.tag == cTag: lib.ElLogDetDiv_c(*args)
elif A.tag == zTag: lib.ElLogDetDiv_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLogDetDivDist_s(*args)
elif A.tag == dTag: lib.ElLogDetDivDist_d(*args)
elif A.tag == cTag: lib.ElLogDetDivDist_c(*args)
elif A.tag == zTag: lib.ElLogDetDivDist_z(*args)
else: DataExcept()
else: TypeExcept()
return div
# SOC Identity
# ============
# TODO
# SOC dots
# ========
lib.ElSOCDots_s.argtypes = \
lib.El
|
SOCDots_d.argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]
lib.ElSOCDotsDist_s.argtypes = \
lib.ElSOCDotsDist_d.argtypes = \
lib.ElSOCDotsDistMultiVec_s.argtypes = \
lib.ElSOCDotsDistMultiVec_d.
|
argtypes = \
[c_void_p,c_void_p,c_void_p,c_void_p,c_void_p,c_int]
def SOCDots(x,y,orders,firstInds,cutoff=1000):
# TODO: Sanity checking
if type(x) is Matrix:
z = Matrix(x.tag)
args = [x.obj,y.obj,z.obj,orders.obj,firstInds.obj]
if x.tag == sTag: lib.ElSOCDots_s(*args)
elif x.tag == dTag: lib.ElSOCDots_d(*args)
else: DataExcept()
return z
elif type(x) is DistMatrix:
z = DistMatrix(x.tag,VC,STAR,x.Grid())
args = [x.obj,y.obj,z.obj,orders.obj,firstInds.obj,cutoff]
if x.tag == sTag: lib.ElSOCDotsDist_s(*args)
elif x.tag == dTag: lib.ElSOCDotsDist_d(*args)
else: DataExcept()
return z
elif type(x) is DistMultiVec:
z = DistMultiVec(x.tag,x.Comm())
args = [x.obj,y.obj,z.obj,orders.obj,firstInds.obj,cutoff]
if x.tag == sTag: lib.ElSOCDotsDistMultiVec_s(*args)
elif x.tag == dTag: lib.ElSOCDotsDistMultiVec_d(*args)
else: DataExcept()
return z
else: TypeExcept()
# SOC Broadcast
# =============
lib.ElSOCBroadcast_s.argtypes = \
lib.ElSOCBroadcast_d.argtypes = \
[c_void_p,c_void_p,c_void_p]
lib.ElSOCBroadcastDist_s.argtypes = \
lib.ElSOCBroadcastDist_d.argtypes = \
lib.ElSOCBroadcastDistMultiVec_s.argtypes = \
lib.ElSOCBroadcastDistMultiVec_d.argtypes = \
[c_void_p,c_void_p,c_void_p,c_int]
def SOCBroadcast(x,orders,firstInds,cutoff=1000):
# TODO: Sanity checking
if type(x) is Matrix:
args = [x.obj,orders.obj,firstInds.obj]
if x.tag == sTag: lib.ElSOCBroadcast_s(*args)
elif x.tag == dTag: lib.ElSOCBroadcast_d(*args)
else: DataExcept()
elif type(x) is DistMatrix:
args = [x.obj,orders.obj,firstInds.obj,cutoff]
if x.tag == sTag: lib.ElSOCBroadcastDist_s(*args)
elif x.tag == dTag: lib.ElSOCBroadcastDist_d(*args)
else: DataExcept()
elif type(x) is DistMultiVec:
args = [x.obj,orders.obj,firstInds.obj,cutoff]
if x.tag == sTag: lib.ElSOCBroadcastDistMultiVec_s(*args)
elif x.tag == dTag: lib.ElSOCBroadcastDistMultiVec_d(*args)
else: DataExcept()
else: TypeExcept()
# SOC Identity
# ============
lib.ElSOCIdentity_s.argtypes = \
lib.ElSOCIdentity_d.argtypes = \
lib.ElSOCIdentityDist_s.argtypes = \
lib.ElSOCIdentityDist_d.argtypes = \
lib.ElSOCIdentityDistMultiVec_s.argtypes = \
lib.ElSOCIdentityDistMultiVec_d.argtypes = \
[c_void_p,c_void_p,c_void_p]
def SOCIdentity(x,orders,firstInds):
# TODO: Sanity checking
args = [x.obj,orders.obj,firstInds.obj]
if type(x) is Matrix:
if x.tag == sTag: lib.ElSOCIdentity_s(*args)
elif x.tag == dTag: lib.ElSOCIdentity_d(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag: lib.ElSOCIdentityDist_s(*args)
elif x.tag == dTag: lib.ElSOCIdentityDist_d(*args)
else: DataExcept()
elif type(x) is DistMultiVec:
if x.tag == sTag: lib.ElSOCIdentityDistMultiVec_s(*args)
elif x.tag == dTag: lib.ElSOCIdentityDistMultiVec_d(*args)
else: DataExcept()
else: TypeExcept()
# SOC Reflect
# ===========
lib.ElSOCReflect_s.argtypes = \
lib.ElSOCReflect_d.argtypes = \
lib.ElSOCReflectDist_s.argtypes = \
lib.ElSOCReflectDist_d.argtypes = \
lib.ElSOCReflectDistMultiVec_s.argtypes = \
lib.ElSOCReflectDistMultiVec_d.argtypes = \
[c_void_p,c_void_p,c_void_p]
def SOCReflect(x,orders,firstInds):
# TODO: Sanity checking
args = [x.obj,orders.obj,firstInds.obj]
if type(x) is Matrix:
if x.tag == sTag: lib.ElSOCReflect_s(*args)
elif x.tag == dTag: lib.ElSOCReflect_d(*args)
else: DataExcept()
elif type(x) is DistMatrix:
if x.tag == sTag
|
davidwilson-85/easymap
|
graphic_output/Pillow-4.2.1/Tests/test_image_tobytes.py
|
Python
|
gpl-3.0
| 247
| 0.004049
|
from helper import unittest, PillowTestCase, hopper
class TestImageToBytes(PillowTe
|
stCase):
def test_sanity(self):
data = hopper().tobytes()
self.assertIsInstance(data, bytes)
if __name__ == '__main__':
|
unittest.main()
|
clouserw/olympia
|
apps/amo/tests/test_helpers.py
|
Python
|
bsd-3-clause
| 15,825
| 0
|
# -*- coding: utf-8 -*-
import mimetypes
import os
from datetime import datetime, timedelta
from urlparse import urljoin
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import encoding
import jingo
import test_utils
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery
import amo
import amo.tests
from amo import urlresolvers, utils, helpers
from amo.utils import ImageCheck
from versions.models import License
def render(s, context={}):
t = jingo.env.from_string(s)
return t.render(context)
def test_strip_html():
eq_('Hey Brother!', render('{{ "Hey <b>Brother!</b>"|strip_html }}'))
def test_currencyfmt():
eq_(helpers.currencyfmt(None, 'USD'), '')
eq_(helpers.currencyfmt(5, 'USD'), '$5.00')
def test_strip_html_none():
eq_('', render('{{ a|strip_html }}', {'a': None}))
eq_('', render('{{ a|strip_html(True) }}', {'a': None}))
def test_strip_controls():
# We want control codes like \x0c to disappear.
eq_('I ove you', helpers.strip_controls('I \x0cove you'))
def test_finalize():
"""We want None to show up as ''. We do this in JINJA_CONFIG."""
eq_('', render('{{ x }}', {'x': None}))
def test_slugify_spaces():
"""We want slugify to preserve spaces, but not at either end."""
eq_(utils.slugify(' b ar '), 'b-ar')
eq_(utils.slugify(' b ar ', spaces=True), 'b ar')
eq_(utils.slugify(' b ar ', spaces=True), 'b ar')
def test_page_title():
request = Mock()
request.APP = amo.THUNDERBIRD
title = 'Oh hai!'
s = render('{{ page_title("%s") }}' % title, {'request': request})
eq_(s, '%s :: Add-ons for Thunderbird' % title)
# pages without app should show a default
request.APP = None
s = render('{{ page_title("%s") }}' % title, {'request': request})
eq_(s, '%s :: Add-ons' % title)
# Check the dirty unicodes.
request.APP = amo.FIREFOX
s = render('{{ page_title(x) }}',
{'request': request,
'x': encoding.smart_str(u'\u05d0\u05d5\u05e1\u05e3')})
class TestBreadcrumbs(object):
def setUp(self):
self.req_noapp = Mock()
self.req_noapp.APP = None
self.req_app = Mock()
self.req_app.APP = amo.FIREFOX
def test_no_app(self):
s = render('{{ breadcrumbs() }}', {'request': self.req_noapp})
doc = PyQuery(s)
crumbs = doc('li>a')
eq_(len(crumbs), 1)
eq_(crumbs.text(), 'Add-ons')
eq_(crumbs.attr('href'), urlresolvers.reverse('home'))
def test_with_app(self):
s = render('{{ breadcrumbs() }}', {'request': self.req_app})
doc = PyQuery(s)
crumbs = doc('li>a')
eq_(len(crumbs), 1)
eq_(crumbs.text(), 'Add-ons for Firefox')
eq_(crumbs.attr('href'), urlresolvers.reverse('home'))
def test_no_add_default(self):
s = render('{{ breadcrumbs(add_default=False) }}',
{'request': self.req_app})
eq_(len(s), 0)
def test_items(self):
s = render("""{{ breadcrumbs([('/foo', 'foo'),
('/bar', 'bar')],
add_default=False) }}'""",
{'request': self.req_app})
doc = PyQuery(s)
crumbs = doc('li>a')
eq_(len(crumbs), 2)
eq_(crumbs.eq(0).text(), 'foo')
eq_(crumbs.eq(0).attr('href'), '/foo')
eq_(crumbs.eq(1).text(), 'bar')
eq_(crumbs.eq(1).attr('href'), '/bar')
def test_items_with_default(self):
s = render("""{{ breadcrumbs([('/foo', 'foo'),
('/bar', 'bar')]) }}'""",
{'request': self.req_app})
doc = PyQuery(s)
crumbs = doc('li>a')
eq_(len(crumbs), 3)
eq_(crumbs.eq(1).text(), 'foo')
eq_(crumbs.eq(1).attr('href'), '/foo')
eq_(crumbs.eq(2).text(), 'bar')
eq_(crumbs.eq(2).attr('href'), '/bar')
def test_truncate(self):
s = render("""{{ breadcrumbs([('/foo', 'abcd efghij'),],
crumb_size=5) }}'""",
{'request': self.req_app})
doc = PyQuery(s)
crumbs = doc('li>a')
eq_('abcd ...', crumbs.eq(1).text())
def test_xss(self):
s = render("{{ breadcrumbs([('/foo', '<script>')]) }}",
{'request': self.req_app})
assert '<script>' in s, s
assert '<script>' not in s
@patch('amo.helpers.urlresolvers.reverse')
def test_url(mock_reverse):
render('{{ url("viewname", 1, z=2) }}')
mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2},
add_prefix=True)
render('{{ url("viewname", 1, z=2, host="myhost") }}')
mock_reverse.assert_called_with('viewname', args=(1,), kwargs={'z': 2},
add_prefix=True)
def test_url_src():
s = render('{{ url("addons.detail", "a3615", src="xxx") }}')
assert s.endswith('?src=xxx')
def test_urlparams():
url = '/en-US/firefox/themes/category'
c = {'base': url,
'base_frag': url + '#hash',
'base_query': url + '?x=y',
'sort': 'name', 'frag': 'frag'}
# Adding a query.
s = render('{{ base_frag|urlparams(sort=sort) }}', c)
eq_(s, '%s?sort=name#hash' % url)
# Adding a fragment.
s = render('{{ base|urlparams(frag
|
) }}', c)
eq_(s, '%s#frag' % url)
# Replacing a fragment.
s = render('{{ base_frag|urlparams(frag) }}', c)
eq_(s, '%s#frag' % url)
# Adding query and fragment.
s = render('{{ base_frag|urlparams(frag, sort=sort) }}', c)
eq_(s, '%s?sort=name#frag' % url)
# Adding query with existing params.
s = render('{{ base_query|urlparams(frag, sort=sort) }}', c)
eq_(s,
|
'%s?sort=name&x=y#frag' % url)
# Replacing a query param.
s = render('{{ base_query|urlparams(frag, x="z") }}', c)
eq_(s, '%s?x=z#frag' % url)
# Params with value of None get dropped.
s = render('{{ base|urlparams(sort=None) }}', c)
eq_(s, url)
# Removing a query
s = render('{{ base_query|urlparams(x=None) }}', c)
eq_(s, url)
def test_urlparams_unicode():
url = u'/xx?evil=reco\ufffd\ufffd\ufffd\u02f5'
utils.urlparams(url)
class TestSharedURL(amo.tests.TestCase):
def setUp(self):
self.addon = Mock()
self.addon.type = amo.ADDON_EXTENSION
self.addon.slug = 'addon'
def test_addonurl(self):
expected = '/en-US/firefox/addon/addon/'
eq_(helpers.shared_url('addons.detail', self.addon), expected)
eq_(helpers.shared_url('apps.detail', self.addon), expected)
eq_(helpers.shared_url('detail', self.addon), expected)
eq_(helpers.shared_url('detail', self.addon, add_prefix=False),
'/addon/addon/')
eq_(helpers.shared_url('reviews.detail', self.addon, 1,
add_prefix=False),
'/addon/addon/reviews/1/')
def test_isotime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|isotime }}', {'d': time})
eq_(s, '2009-12-25T18:11:12Z')
s = render('{{ d|isotime }}', {'d': None})
eq_(s, '')
def test_epoch():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|epoch }}', {'d': time})
eq_(s, '1261764672')
s = render('{{ d|epoch }}', {'d': None})
eq_(s, '')
def test_locale_url():
rf = test_utils.RequestFactory()
request = rf.get('/de', SCRIPT_NAME='/z')
prefixer = urlresolvers.Prefixer(request)
urlresolvers.set_url_prefix(prefixer)
s = render('{{ locale_url("mobile") }}')
eq_(s, '/z/de/mobile')
def test_external_url():
redirect_url = settings.REDIRECT_URL
secretkey = settings.REDIRECT_SECRET_KEY
settings.REDIRECT_URL = 'http://example.net'
settings.REDIRECT_SECRET_KEY = 'sekrit'
try:
myurl = 'http://example.com'
s = render('{{ "%s"|external_url }}' % myurl)
eq_(s, urlresolvers.get_outgoing_url(myurl))
finally:
settings.REDIRECT_URL = redirect_url
settings.REDIRECT_SECRET_KEY = secr
|
GarlandDA/bad-boids
|
bad_boids/test/test_boids.py
|
Python
|
mit
| 5,230
| 0.039962
|
import yaml
import os
from ..boids import Boids
from nose.tools import assert_equal
import random
import numpy as np
from unittest.mock import patch
import unittest.mock as mock
def test_Boids():
flock = Boids(boid_number=10,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_position_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# make sure the class is initialised correctly:
assert_equal(flock.boid_number,10)
assert_equal(flock.move_to_middle_strength,0.1)
assert_equal(flock.all_the_boids,range(10))
def test_fly_to_middle():
flock = Boids(boid_number=2,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_position_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# make sure self.all_the_boids corresponds to the right thing, i.e. range(self.boid_number)
np.testing.assert_array_equal(range(2),flock.all_the_boids)
assert_equal(flock.move_to_middle_strength,0.1)
# make sure arrays are updated to what we expect them to - #1
flock.x_velocities = [1, 2]
flock.x_positions = [2, 1]
flock.fly_to_middle()
np.testing.assert_array_almost_equal(flock.x_velocities,[0.95, 2.05])
# make sure arrays are updated to what we expect them to - #2
flock.x_velocities = [5, 2]
flock.x_positions = [2, 46]
flock.fly_to_middle()
np.testing.assert_array_almost_equal(flock.x_velocities,[7.2, -0.2])
def test_fly_away():
flock = Boids(boid_number=2,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_position_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# make sure self.all_the_boids corresponds to the right thing, i.e. range(self.boid_number)
np.testing.assert_array_equal(range(2),flock.all_the_boids)
assert_equal(flock.alert_distance,100)
# make sure arrays are updated to what we expect them to - #1
flock.x_velocities = [1, 2]
flock.x_positions = [2, 1]
flock.fly_away()
np.testing.assert_array_almost_equal(flock.x_velocities,[2, 1])
# make sure arrays are updated to what we expect them to - #2
flock.x_velocities = [5, 2]
flock.x_positions = [2, 46]
flock.fly_away()
np.testing.assert_array_almost_equal(flock.x_velocities,[5, 2])
def test_match_speed():
flock = Boids(boid_number=2,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_position_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# make sure self.all_the_boids corresponds to the right thing, i.e. range(self.boid_number)
np.testing.assert_array_equal(range(2),flock.all_the_boids)
assert_equal(flock.formation_flying_distance,900)
assert_equal(flock.formation_flying_strength,0.5)
# make sure arrays are updated to what we expect them to - #1
flock.y_velocities = [1, 2]
flock.match_speed()
np.testing.assert_array_almost_equal(flock.y_velocities,[1., 2.] )
# make sure arrays are updated to what we expect them to - #2
flock.y_velocities = [14, 15]
flock.match_speed()
np.testing.assert_array_almost_equal(flock.y_velocities,[14., 15.])
def test_update_boids():
flock = Boids(boid_number=2,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_position_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# test that update_boids() is called all right
with mock.patch.object(flock,'update_boids') as mock_update:
updated = flock.update_boids('')
mock_update.assert_called_with('')
# test that fly_to_middle() works
with mock.patch.object(flock,'fly_to_middle') as mock_middle:
flown_to_middle = flock.fly_to_middle('')
mock_middle.assert_called_with('')
# test that fly_away() works
with mock.patch.object(flock,'fly_away') as mock_away:
flown_away = flock.fly_away('')
mock_away.assert_called_with('')
# test that match_speed() works
with mock.patch.object(flock,'match_speed')
|
as mock_match:
matched = flock.match_speed('')
mock_match.assert_called_with('')
# test that move() works
with mock.patch.object(flock,'move') as mock_move:
moved = flock.move('')
mock_move.assert_called_with('')
def test_animate():
flock = Boids(boid_number=2,move_to_middle_strength=0.1,alert_distance=100,formation_flying_distance=900,formation_flying_strength=0.5,
x_p
|
osition_min=0,x_position_max=200,y_position_min=-5,y_position_max=5,
x_velocity_min=-10,x_velocity_max=30,y_velocity_min=-20,y_velocity_max=20)
# test that animate() is called correctly
with mock.patch.object(flock,'animate') as mock_animate:
animated = flock.animate('frame')
mock_animate.assert_called_with('frame')
|
owalch/oliver
|
linux/config/scripts/ipblock.py
|
Python
|
bsd-2-clause
| 870
| 0.006897
|
#!/usr/bin/env python
import sys
def ip2str(ip):
l = [
(ip >> (3*8))
|
& 0xFF,
(ip >> (2*8)) & 0xFF,
(ip >> (1*8)) & 0xFF,
(ip >> (0*8)) & 0xFF,
]
return '.'.join([str(i) for i in l])
def str2ip(line):
a, b, c, d = [int(s) for s in line.split('.')]
ip = 0
ip += (a << (3*8))
ip += (b << (2*8))
ip += (c << (1*8))
ip += (d << (0*8))
return ip
blockip = str2ip(sys.stdin.readline())
hostmask = 1
bitcount = 1
for line in sys.stdin.readlines():
try:
ip = str2ip(line.strip())
e
|
xcept:
print 'Ignored line:', line,
continue
while (blockip & (~hostmask)) != (ip & (~hostmask)):
hostmask = (hostmask << 1) | 1
bitcount += 1
print ip2str(blockip & (~hostmask)) + '/' + str(bitcount), 'hostmask =', ip2str(hostmask)
print 'wrong way around'
|
washbz250/LearnPythonTheHardWay
|
Python3/Tkinter/main.py
|
Python
|
unlicense
| 173
| 0.011561
|
# Im
|
port the tkinter library
from tkinter import *
# Making a window object
root = Tk()
# Need to have window onscreen until you need to get rid of it.
root.mainlo
|
op()
|
erramuzpe/seattle-perceptual-learning
|
perclearn/scripts/fashion_mnist_cnn.py
|
Python
|
apache-2.0
| 2,248
| 0
|
'''Trains a simple convnet on the Fashion MNIST dataset.
Gets to % test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
'''
from __future__ import print_function
import keras
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, nu
|
m_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compi
|
le(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
rnirmal/savanna
|
savanna/tests/integration/test_config/test_cluster_config.py
|
Python
|
apache-2.0
| 6,861
| 0
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import telnetlib
from savanna.tests.integration import base
import savanna.tests.integration.configs.parameters as param
def _add_config(body, config):
if config in [param.NAMENODE_CONFIG, param.DATANODE_CONFIG]:
body['node_configs']['HDFS'] = config
elif config == param.GENERAL_CONFIG:
body['cluster_configs']['general'] = config
elif config == param.CLUSTER_HDFS_CONFIG:
body['cluster_configs']['HDFS'] = config
elif config == param.CLUSTER_MAPREDUCE_CONFIG:
body['cluster_configs']['MapReduce'] = config
else:
body['node_configs']['MapReduce'] = config
class ClusterConfigTest(base.ITestCase):
def setUp(self):
super(ClusterConfigTest, self).setUp()
telnetlib.Telnet(self.host, self.port)
def assertConfigs(self, get_config, param_config):
self.assertEqual(get_config, param_config,
msg='configs are not equal: %s != %s'
% (str(get_config), str(param_config)))
def assertConfigOnNode(self, host, config, value):
conf = config.replace(' ', '')
com = self.execute_command(host, './script.sh %s -val %s -url %s' %
(conf, value, param.OS_AUTH_URL))
self.assertEqual(com[0], 0,
msg='host: %s, config %s is not equal: %s'
% (host, config, value))
def _cluster_config_testing(self, cluster_body):
cluster_id = None
try:
_add_config(cluster_body, param.GENERAL_CONFIG)
_add_config(cluster_body, param.CLUSTER_HDFS_CONFIG)
_add_config(cluster_body, param.CLUSTER_MAPREDUCE_CONFIG)
cluster_id = self.create_cluster_and_get_id(cluster_body)
get_data = self.get_object(self.url_cluster_with_slash,
cluster_id, 200, True)
get_data = get_data['cluster']
self.assertConfigs(get_data['cluster_configs']['general'],
param.GENERAL_CONFIG)
self.assertConfigs(get_data['cluster_configs']['HDFS'],
param.CLUSTER_HDFS_CONFIG)
self.assertConfigs(get_data['cluster_configs']['MapReduce'],
param.CLUSTER_MAPREDUCE_CONFIG)
node_groups = get_data['node_groups']
ip_instances = {}
process_map = {
'namenode': {
'service': 'HDFS', 'param': param.NAMENODE_CONFIG},
'jobtracker': {
'service': 'MapReduce', 'param': param.JOBTRACKER_CONFIG},
'datanode': {
'service': 'HDFS', 'param': param.DATANODE_CONFIG},
'tasktracker': {
'service': 'MapReduce', 'param': param.TASKTRACKER_CONFIG}
}
def get_node_configs(node_group, process):
return \
node_group['node_configs'][process_map[process]['service']]
def get_param(process):
return process_map[process]['param']
for node_group in node_groups:
for process in node_group['node_processes']:
self.assertConfigs(
get_node_configs(node_group,
process), get_param(process))
instances = node_group['instances']
for instans in instances:
management_ip = instans['management_ip']
self.transfer_script_to_node(
management_ip, 'test_config/config_test_script.sh')
ip_instances[management_ip] = node_group[
'node_processes']
try:
for key, processes in ip_instances.items():
telnetlib.Telnet(key, '22')
for conf, value in param.CLUSTER_MAPREDUCE_CONFIG.items():
self.assertConfigOnNode(key, conf, value)
for conf, value in param.CLUSTER_HDFS_CONFIG.items():
self.assertConfigOnNode(key, conf, value)
for process in processes:
for sec_key, sec_value in get_param(process).items():
self.assertConfigOnNode(key, sec_key, sec_value)
if 'namenode' in processes:
for sec_key, sec_value in param.GENERAL_CONFIG.items():
self.assertConfigOnNode(
key, sec_key, sec_value)
except Exception as e:
self.fail(e.message)
except Exception as e:
self.fail(e.message)
finally:
self.del_object(self.url_cluster_with_slash, cluster_id, 204)
def test_cluster_config_nnjt_ttdn(self):
id_master_ngt = None
id_worker_ngt = None
try:
master_ngt_body = self.make_node_group_template(
'master-ngt', 'qa probe', 'JT+NN')
_add_config(master_ngt_body, param.NAMENODE_CONFIG)
_add_config(master_ngt_body, param.JOBTRACKER_CONFIG)
id_master_ng
|
t = self.get_object_id(
'node_group_template', sel
|
f.post_object(self.url_ngt,
master_ngt_body, 202))
worker_ngt_body = self.make_node_group_template(
'worker-ngt', 'qa probe', 'TT+DN')
_add_config(worker_ngt_body, param.DATANODE_CONFIG)
_add_config(worker_ngt_body, param.TASKTRACKER_CONFIG)
id_worker_ngt = self.get_object_id(
'node_group_template', self.post_object(self.url_ngt,
worker_ngt_body, 202))
ngt_id_list = {id_master_ngt: 1, id_worker_ngt: 2}
cl_body = self.make_cl_body_node_group_templates(ngt_id_list)
self._cluster_config_testing(cl_body)
except Exception as e:
self.fail(str(e))
finally:
self.del_object(self.url_ngt_with_slash, id_master_ngt, 204)
self.del_object(self.url_ngt_with_slash, id_worker_ngt, 204)
|
BitWriters/Zenith_project
|
zango/lib/python3.5/site-packages/django/contrib/contenttypes/migrations/0002_remove_content_type_name.py
|
Python
|
mit
| 1,168
| 0.000856
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in Conte
|
ntType.object
|
s.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={'model_name': 'contenttype'},
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
|
johnmgregoire/JCAPdatavis
|
echem_paperplots.py
|
Python
|
bsd-3-clause
| 3,052
| 0.020315
|
import time, copy
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from scipy import optimize
from echem_plate_ui import *
from echem_plate_math import *
import pickle
p1='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/echemplots/2012-9_FeCoNiTi_500C_fastCPCV_plate1_dlist_1066.dat'
p2='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/echemplots/2012-9_FeCoNiTi_500C_fastCPCV_plate1_dlist_1662.dat'
pill='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/echemplots/2012-9FeCoNiTi_500C_CAill_plate1_dlist_1164.dat'
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/echemplots')
vshift=-.24
imult=1.e6
cai0, cai1=(0, 6500)
f=open(p1, mode='r')
d1=pickle.load(f)
f.close()
f=open(p2, mode='r')
d2=pickle.load(f)
f.close()
f=open(pill, mode='r')
dill=pickle.load(f)
f.close()
segd1up, segd1dn=d1['segprops_dlist']
i1up=d1['I(A)'][segd1up['inds']][4:]
lin1up=i1up-d1['I(A)_LinSub'][segd1up['inds']][4
|
:]
v1up=d1['Ewe(V)'][segd1up['inds']][4:]+vshift
i1dn=d1['I(A)'][segd1dn['inds']]
v1dn=d1['Ewe(V)'][segd1dn['inds']]+vshift
i1up*=imult
i1dn*=imult
lin1up*=imult
segd2up, segd2dn=d2['segprops_dlist']
i2up=d2['I(A)'][segd2up['inds']][4:]
lin2up=i2up-d2['I(A)_LinSub'][segd2up['inds']][4:]
v2up=d2['Ewe(V)'][segd2up['inds']][4:
|
]+vshift
i2dn=d2['I(A)'][segd2dn['inds']]
v2dn=d2['Ewe(V)'][segd2dn['inds']]+vshift
i2up*=imult
i2dn*=imult
lin2up*=imult
ica=dill['I(A)_SG'][cai0:cai1]*imult
icadiff=dill['Idiff_time'][cai0:cai1]*imult
tca=dill['t(s)'][cai0:cai1]
tca_cycs=dill['till_cycs']
cycinds=numpy.where((tca_cycs>=tca.min())&(tca_cycs<=tca.max()))[0]
tca_cycs=tca_cycs[cycinds]
iphoto_cycs=dill['Photocurrent_cycs(A)'][cycinds]*imult
pylab.rc('font', family='serif', serif='Times New Roman', size=11)
fig=pylab.figure(figsize=(3.5, 4.5))
#ax1=pylab.subplot(211)
#ax2=pylab.subplot(212)
ax1=fig.add_axes((.2, .6, .74, .35))
ax2=fig.add_axes((.2, .11, .6, .35))
ax3=ax2.twinx()
ax1.plot(v1up, i1up, 'g-', linewidth=1.)
ax1.plot(v1up, lin1up, 'g:', linewidth=1.)
ax1.plot(v1dn, i1dn, 'g--', linewidth=1.)
ax1.plot(v2up, i2up, 'b-', linewidth=1.)
ax1.plot(v2up, lin2up, 'b:', linewidth=1.)
ax1.plot(v2dn, i2dn, 'b--', linewidth=1.)
ax1.set_xlim((-.1, .62))
ax1.set_ylim((-40, 130))
ax1.set_xlabel('Potential (V vs H$_2$O/O$_2$)', fontsize=12)
ax1.set_ylabel('Current ($\mu$A)', fontsize=12)
ax2.plot(tca, ica, 'k-')
ax2.plot(tca, icadiff, 'b--', linewidth=2)
ax2.set_xlim((0, 6.5))
ax2.set_ylim((0, 0.4))
ax3.plot(tca_cycs, iphoto_cycs, 'ro-')
ax3.set_ylim((0, 0.1))
ax2.set_xlabel('Elapsed time (s)', fontsize=12)
ax2.set_ylabel('Current ($\mu$A)', fontsize=12)
ax3.set_ylabel('Photocurrent ($\mu$A)', fontsize=12)
pylab.show()
print ''.join(['%s%.3f' %tup for tup in zip(dill['elements'], dill['compositions'])])
print ''.join(['%s%.3f' %tup for tup in zip(d1['elements'], d1['compositions'])])
print ''.join(['%s%.3f' %tup for tup in zip(d2['elements'], d2['compositions'])])
|
geraldarthur/PrismJr
|
twitter.py
|
Python
|
mit
| 5,815
| 0.003955
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from csv import DictReader, DictWriter
from datetime import datetime
from os import environ
from sys import argv
from time import sleep
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
# (command, argument, input, output) = argv[1:]
# Twitter specific auth urls
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize?oauth_token="
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
# secretz
CONSUMER_KEY = environ.get('TW_CONSUMER_KEY')
CONSUMER_SECRET = environ.get('TW_CONSUMER_SECRET')
# written at runtime
OAUTH_TOKEN = ""
OAUTH_TOKEN_SECRET = ""
# API urls & params
BASE_URL = "https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name="
COUNT = "&count=200"
CUTOFF = datetime(2013, 6, 21)
# get variables from the command line
# (argument, input, output) = argv[1:]
def setup_oauth():
"""Authorize your app
|
via identifier."""
# Request token
oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)
r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
resource_owner_key = credentials.get('oauth_token')[0]
resource_owner_secret = credentials.get('oauth_token_secret')[0]
# Authorize
authorize_url = AUTHORIZE_URL + resource_owner_key
print 'Please go here and authorize: ' + authorize_url
verifier =
|
raw_input('Please input the verifier: ')
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
# Finally, Obtain the Access Token
r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
token = credentials.get('oauth_token')[0]
secret = credentials.get('oauth_token_secret')[0]
return token, secret
# get the oauth
def get_oauth():
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=OAUTH_TOKEN,
resource_owner_secret=OAUTH_TOKEN_SECRET)
return oauth
def input_users(filename):
"""Reads a CSV and returns a list of majority and minority users"""
with open(filename, 'rU') as readf:
# create a list of dicts of Twitter URLs nested objects
data = list(DictReader(readf))
# create a list of majority Twitter accounts strings
tweeps = {}
missing = [obj['handle'] for obj in data if obj['handle']]
#create dict of arrays of Twitter screen names
tweeps.update({
'missing': missing
})
return tweeps
def parse_tweets(lists):
"""
Takes a list of dictionaries, get the good bits
Returns a dictionary
"""
timeline = []
for statuses in lists:
for status in statuses:
format = "%a %b %d %H:%M:%S +0000 %Y"
output_format = "%m/%d/%Y %H:%M:%S"
stamp = datetime.strptime(status['created_at'], format)
if stamp >= CUTOFF:
# write the data to a new dictionary
timeline.append({
# "id": "%s" % status['id_str'],
"screen_name": "%s" % status['user']['screen_name'],
"text": "%s" % status['text'],
"retweet_count": "%s" % status['retweet_count'],
"favorite_count": "%s" % status['favorite_count'],
"created_at": "%s" % stamp.strftime(output_format),
"week": "%s" % stamp.strftime("%U"),
"id": "%s" % status['id_str']
})
return timeline
def next_timeline(statuses, tweep, oauth):
"""
Parse out the max_id and since_id, then keep on rolling.
Takes a nested objects
"""
max_id = statuses[-1]['id'] - 1
max_param = "&max_id=%s" % max_id
url = BASE_URL + tweep + COUNT + max_param
next_r = requests.get(url=url, auth=oauth)
return next_r.json()
def make_requests(users, oauth):
"""
Construct a request from a list of users
Return a request object
"""
storage = []
for user in users:
print user
r = requests.get(url=BASE_URL+user+COUNT, auth=oauth)
statuses = r.json()
next = next_timeline(statuses, user, oauth)
print 'initial %s' %len(statuses)
while len(next) > 0:
statuses.extend(next)
print 'added %s' % len(next)
sleep(10)
next = next_timeline(statuses, user, oauth)
storage.append(statuses)
print 'appended %s total to storage' % len(statuses)
return storage
def output_csv(timeline, user=None):
"""
Takes a dictionary and writes it to a CSV
"""
row0 = timeline[0].keys()
if user != None:
write_name = user + '.csv'
else:
write_name = 'missing_tweeps.csv'
with open(write_name, 'wb') as writef:
# Write the header row because of reasons.
write_csv = DictWriter(writef, fieldnames=row0)
write_csv.writeheader()
# write the dictionary to a CSV, and encode strings at UTF8
for d in timeline:
write_csv.writerow({k:v.encode('utf8') for k,v in d.items()})
print 'PRISM Jr. is done. Please see %s' % write_name
if __name__ == "__main__":
if not OAUTH_TOKEN:
token, secret = setup_oauth()
print "OAUTH_TOKEN: " + token
OAUTH_TOKEN = token
print "OAUTH_TOKEN_SECRET: " + secret
OAUTH_TOKEN_SECRET = secret
print
else:
oauth = get_oauth()
tweeps = input_users('missing.csv')
storage = make_requests(tweeps['missing'], oauth)
timeline = parse_tweets(storage)
output_csv(timeline)
|
haginara/csvkit
|
csvkit/utilities/csvlook.py
|
Python
|
mit
| 2,305
| 0.004338
|
#!/usr/bin/env python
import itertools
import six
from csvkit import CSVKitReader
from csvkit.cli import CSVKitUtility
from csvkit.headers import make_default_headers
class CSVLook(CSVKitUtility):
description = 'Render a CSV file in the console as a fixed-width table.'
def add_arguments(self):
pass
|
def main(self):
rows = CSVKitReader(self.input_file, **self.reader_kwargs)
# Make a default header row if none exists
if self.args.no_header_row:
row = next(rows)
column_names = make_default_headers(len(row))
# Put the row back on top
|
rows = itertools.chain([row], rows)
else:
column_names = next(rows)
column_names = list(column_names)
# prepend 'line_number' column with line numbers if --linenumbers option
if self.args.line_numbers:
column_names.insert(0, 'line_number')
rows = [list(itertools.chain([str(i + 1)], row)) for i, row in enumerate(rows)]
# Convert to normal list of rows
rows = list(rows)
# Insert the column names at the top
rows.insert(0, column_names)
widths = []
for row in rows:
for i, v in enumerate(row):
try:
if len(v) > widths[i]:
widths[i] = len(v)
except IndexError:
widths.append(len(v))
# Dashes span each width with '+' character at intersection of
# horizontal and vertical dividers.
divider = '|--' + '-+-'.join('-'* w for w in widths) + '--|'
self.output_file.write('%s\n' % divider)
for i, row in enumerate(rows):
output = []
for j, d in enumerate(row):
if d is None:
d = ''
output.append(' %s ' % six.text_type(d).ljust(widths[j]))
self.output_file.write('| %s |\n' % ('|'.join(output)))
if (i == 0 or i == len(rows) - 1):
self.output_file.write('%s\n' % divider)
def launch_new_instance():
utility = CSVLook()
utility.main()
if __name__ == "__main__":
launch_new_instance()
|
veskopos/VMWare
|
api/requests_api.py
|
Python
|
gpl-2.0
| 1,010
| 0.036634
|
import requests
import yaml
class RequestsApi:
def __init__(self):
'init'
self.config = yaml.load(open("config/request_settings.yml", "r"))
def get_objects(self, sector):
'request to get objects'
objects_points = []
url = self.config['host'] + self.config['object_path'] % sector
response = requests.get(url)
if not response.status_code == 200 : return []
for line in response.text.splitlines():
objects_points.append([int(num) for num in line.split(' ')])
return objects_points
def get_roots(self, sector):
'request to get roots'
roots = []
url
|
= self.config['host'] + self.config['root_path'] % sector
response = requests.get(url)
if not response.
|
status_code == 200 : return []
for line in response.text.splitlines():
roots.append(int(line))
return roots
def send_trajectory(self, sector, paths):
'requets to send trajectory'
url = self.config['host'] + self.config['trajectory_path'] % sector
requests.post(url, params = {'trajectory' : paths})
|
telminov/sw-django-utils
|
djutils/forms.py
|
Python
|
mit
| 604
| 0
|
# coding: utf-8
def transform_form_error(form, verb
|
ose=True):
"""
transform form errors to list like
["field1: error1", "field2: error2"]
"""
errors = []
for field, err_msg in form.errors.items():
if field == '__all__': # general errors
errors.append(', '.join(err_msg))
else: # field errors
field_name = field
if verbose and field in form.fields:
fi
|
eld_name = form.fields[field].label or field
errors.append('%s: %s' % (field_name, ', '.join(err_msg)))
return errors
|
3dfxsoftware/cbss-addons
|
account_voucher_move_id/__openerp__.py
|
Python
|
gpl-2.0
| 1,448
| 0.002762
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Show Journal Entry in voucher',
'version': '0.1',
'author': 'Vauxoo',
'category': 'Accounting',
'website': 'http://www.vauxoo.com',
'description': """
This module show Journal Entry in:
========================================
*Sales Receipts
*Customer Payments
*Purchase Receipts
*Supplier Payments
""",
'depends' : [
|
'account_voucher'],
'data': [
"account_voucher.xml",
],
'auto_install': False,
'installable': True,
}
|
|
woolfson-group/isambard
|
unit_tests/random_isambard_objects.py
|
Python
|
mit
| 2,186
| 0.005489
|
import random
import numpy
import isambard
def random_angles(n=1, min_val=0, max_val=180, radians=False):
angles = [(random.random() * random.choice(range(abs(max_val - min_val)))) + min_val for _ in range(n)]
if radians:
angles = [numpy.rad2deg(x) for x in angles]
return angles
def random_vectors(n=1, min_val=-100, max_val=100, vector_length=3):
return [[(random.random() * random.choice(range(abs(max_val - min_val)))) + min_val
for _ in range(vector_length)] for _ in range(n)]
def random_integer_vectors(n=1, min_val=-100, max_val=100, vector_length=3):
return [[random.choice(range(min_val, max_val)) for _ in range(vector_length)] for _ in range(n)]
def random_floats(n=1, min_val=-100, max_val=100):
return [(random.random() * random.choice(range(abs(max_val - min_val)))) + min_val for _ in range(n)]
def random_helical_helices(n=1, min_residues=10, max_residues=20):
# build random HelicalHelix objects
major_radii = random_floats(n=n, min_val=1, max_val=100)
major_pitches = random_floats(n=n, min_val=100, max_val=1000)
aas = [random.choice(range(min_residues, max_residues)) for _ in range(n)]
phi_c_alphas = random_angles(n=n, min_val=-179,
|
max_val=180)
orientations = [random.choice([-1, 1]) for _ in range(n)]
# minor_repeat can't be set to zero - must be set to None.
minor_repeats = random_floats(n=n, min_val=0, max_val=100)
zero_indices = [i for i, x in enumerate(minor_repea
|
ts) if x == 0.0]
for i in zero_indices:
minor_repeats[i] = None
minor_helix_types = [random.choice(['alpha', 'pi', 'PPII', 'collagen']) for _ in range(n)]
major_handedness = [random.choice(['l', 'r']) for _ in range(n)]
hhs = [isambard.ampal.specifications.polymer_specs.helix.HelicalHelix(aa=aas[i], major_pitch=major_pitches[i],
major_radius=major_radii[i], major_handedness=major_handedness[i],
minor_helix_type=minor_helix_types[i], orientation=orientations[i],
phi_c_alpha=phi_c_alphas[i], minor_repeat=minor_repeats[i])
for i in range(n)]
return hhs
__author__ = 'Jack W. Heal'
|
snakeleon/YouCompleteMe-x64
|
python/ycm/youcompleteme.py
|
Python
|
gpl-3.0
| 31,916
| 0.027259
|
# Copyright (C) 2011-2018 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import base64
import json
import logging
import os
import signal
import vim
from subprocess import PIPE
from tempfile import NamedTemporaryFile
from ycm import base, paths, signature_help, vimsupport
from ycm.buffer import BufferDict
from ycmd import utils
from ycmd.request_wrap import RequestWrap
from ycm.omni_completer import OmniCompleter
from ycm import syntax_parse
from ycm.client.ycmd_keepalive import YcmdKeepalive
from ycm.client.base_request import BaseRequest, BuildRequestData
from ycm.client.completer_available_request import SendCompleterAvailableRequest
from ycm.client.command_request import ( SendCommandRequest,
SendCommandRequestAsync,
GetCommandResponse )
from ycm.client.completion_request import CompletionRequest
from ycm.client.resolve_completion_request import ResolveCompletionItem
from ycm.client.signature_help_request import ( SignatureHelpRequest,
SigHelpAvailableByFileType )
from ycm.client.debug_info_request import ( SendDebugInfoRequest,
FormatDebugInfoResponse )
from ycm.client.omni_completion_request import OmniCompletionRequest
from ycm.client.event_notification import SendEventNotificationAsync
from ycm.client.shutdown_request import SendShutdownRequest
from ycm.client.messages_request import MessagesPoll
def PatchNoProxy():
current_value = os.environ.get( 'no_proxy', '' )
additions = '127.0.0.1,localhost'
os.environ[ 'no_proxy' ] = ( additions if not current_value
else current_value + ',' + additions )
# We need this so that Requests doesn't end up using the local HTTP proxy when
# talking to ycmd. Users should actually be setting this themselves when
# configuring a proxy server on their machine, but most don't know they need to
# or how to do it, so we do it for them.
# Relevant issues:
# https://github.com/Valloric/YouCompleteMe/issues/641
# https://github.com/kennethreitz/requests/issues/879
PatchNoProxy()
# Force the Python interpreter embedded in Vim (in which we are running) to
# ignore the SIGINT signal. This helps reduce the fallout of a user pressing
# Ctrl-C in Vim.
signal.signal( signal.SIGINT, signal.SIG_IGN )
HMAC_SECRET_LENGTH = 16
SERVER_SHUTDOWN_MESSAGE = (
"The ycmd server SHUT DOWN (restart with ':YcmRestartServer')." )
EXIT_CODE_UNEXPECTED_MESSAGE = (
"Unexpected exit code {code}. "
"Type ':YcmToggleLogs {logfile}' to check the logs." )
CORE_UNEXPECTED_MESSAGE = (
"Unexpected error while loading the YCM core library. "
"Type ':YcmToggleLogs {logfile}' to check the logs." )
CORE_MISSING_MESSAGE = (
'YCM core library not detected; you need to compile YCM before using it. '
'Follow the instructions in the documentation.' )
CORE_OUTDATED_MESSAGE = (
'YCM core library too old; PLEASE RECOMPILE by running the install.py '
'script. See the documentation for more details.' )
NO_PYTHON2_SUPPORT_MESSAGE = (
'YCM has dropped support for python2. '
'You need to recompile it with python3 instead.' )
SERVER_IDLE_SUICIDE_SECONDS = 1800 # 30 minutes
CLIENT_LOGFILE_FORMAT = 'ycm_'
SERVER_LOGFILE_FORMAT = 'ycmd_{port}_{std}_'
# Flag to set a file handle inheritable by child processes on Windows. See
# https://msdn.microsoft.com/en-us/library/ms724935.aspx
HANDLE_FLAG_INHERIT = 0x00000001
class YouCompleteMe:
def __init__( self, default_options = {} ):
self._logger = logging.getLogger( 'ycm' )
self._client_logfile = None
self._server_stdout = None
self._server_stderr = None
self._server_popen = None
self._default_options = default_options
self._ycmd_keepalive = YcmdKeepalive()
self._SetUpLogging()
self._SetUpServer()
self._ycmd_keepalive.Start()
def _SetUpServer( self ):
self._available_completers = {}
self._user_notified_about_crash = False
self._filetypes_with_keywords_loaded = set()
self._server_is_ready_with_cache = False
self._message_poll_requests = {}
self._latest_completion_request = None
self._latest_signature_help_request = None
self._signature_help_available_requests = SigHelpAvailableByFileType()
self._latest_command_reqeust = None
self._signature_help_state = signature_help.SignatureHelpState()
self._user_options = base.GetUserOptions( self._default_options )
self._omnicomp = OmniCompleter( self._user_options )
self._buffers = BufferDict( self._user_options )
self._SetLogLevel()
hmac_secret = os.urandom( HMAC_SECRET_LENGTH )
options_dict = dict( self._user_options )
options_dict[ 'hmac_secret' ] = utils.ToUnicode(
base64.b64encode( hmac_secret ) )
options_dict[ 'server_keep_logfiles' ] = self._user_options[
'keep_logfiles' ]
# The temp options file is deleted by ycmd during startup.
with NamedTemporaryFile( delete = False, mode = 'w+' ) as options_file:
json.dump( options_dict, options_file )
server_port = utils.GetUnusedLocalhostPort()
BaseRequest.server_location = 'http://127.0.0.1:' + str( server_port )
BaseRequest.hmac_secret = hmac_secret
try:
python_interpreter = paths.PathToPythonInterpreter()
except RuntimeError as error:
error_message = (
f"Unable to start the ycmd server. { str( error ).rstrip( '.' ) }. "
"Correct the error then restart the server "
"with ':YcmRestartServer'." )
self._logger.exception( error_message )
vimsupport.PostVimMessage( error_message )
return
args = [ python_interpreter,
paths.PathToServerScript(),
f'--port={ server_port }',
f'--options_file={ options_file.name }',
f'--log={ self._user_options[ "log_level" ] }',
f'--idle_suicide_seconds={ SERVER_IDLE_SUICIDE_SECONDS }' ]
self._server_stdout = utils.CreateLogfile(
SERVER_LOGFILE_FORMAT.format( port = server_port, std = 'stdout' ) )
self._server_stderr = utils.CreateLogfile(
SERVER_LOGFILE_FORMAT.format( port = server_port, std = 'stderr' ) )
args.append( f'--stdout={ self._server_stdout }' )
args.append( f'--stderr={ self._server_stderr }' )
if self._user_options[ 'keep_logfiles' ]:
args.append( '--keep_logfiles' )
self._server_popen = utils.SafePopen( args, stdin_windows = PIPE,
stdout = PIPE, stderr = PIPE )
def _SetUpLogging( self ):
def FreeFileFromOtherProcesses( file_object ):
if utils.OnWindows():
from ctypes import windll
import msvcrt
file_handle = msvcrt.get_osfhandle( file_object.fileno() )
windll.kernel32.SetHandleInformation( file_handle,
|
HANDLE_FLAG_INHERIT,
0 )
self._client_logfile = utils.CreateLogfile( CLIENT_LOGFILE_FORMAT )
handler = logging.FileHandler( self._client_logfile )
# On Windows an
|
d Python prior to 3.4, file handles are inherited by child
# processes started with at least one replaced standard stream, which is the
# case when we start the ycmd server (we are redirecting all standard
# outputs into a pipe). These files cannot be removed while the child
# processes are still up. This is not desirable for a logfile because we
# want to remove it at Vim exit withou
|
openstack/doc8
|
src/doc8/__main__.py
|
Python
|
apache-2.0
| 641
| 0
|
# Copyright (C) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "Lice
|
nse"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY
|
KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from doc8 import main
sys.exit(main.main())
|
CamFlawless/python_projects
|
crm/src/podio_2.py
|
Python
|
mit
| 18,252
| 0.01808
|
# Build out a means of storing this data into a DB and preserving it for archival and analysis
client_id = "crmdatagettest"
client_secret = "I88RXVb9y1Am0AXauUSdOr8Pux9zAwNHDRSLX0UKmI6SbtTI0u4inxGa8i6cwVqi"
username = "camjcollins@gmail.com"
password = "Padthai123*"
from pypodio2 import api
import pygsheets
import json
import pandas as pd
from sqlalchemy import create_engine
c = api.OAuthClient(client_id, client_secret, username, password)
def _get_podio_data(my_list, field):
output_list = []
for record in my_list:
for part in record:
if part == field:
output_list.append(record[part])
return(output_list)
apps_dict = {}
# APPEND LIST WITH ADDITIONAL APP NAME AND IDs
my_apps = [ ["sales_leads", 25523237],
["offers", 25523238],
["whiteboard", 25523241],
["appointments", 25523247]
]
for app in my_apps:
apps_dict[app[0]] = c.Application.get_items(app[1])['items']
# print(apps_dict)
# quit()
# CREATING EMPTY DICTS TO BE APPENDED
'''
NOTES: CREATED BY IS A MORE DEEPLY NESTED LIST IN THE OBJECT
WILL NEED TO WRITE ADDITIONAL FOR LOOPS TO GO AND GRAB VALUE
'''
app_item_id_dict = {}
item_created_on_dict = {}
item_created_by_dict = {}
item_last_event_on_dict = {}
item_fields_dict = {}
for key, value in apps_dict.items():
app_item_id_dict[key] = _get_podio_data(value, "app_item_id")
item_created_on_dict[key] = _get_podio_data(value, "created_on")
created_by_list = []
for item in _get_podio_data(value, "created_by"):
created_by_list.append(item['name'])
item_created_by_dict[key] = created_by_list
item_last_event_on_dict[key] = _get_podio_data(value, "last_event_on")
item_fields_dict[key] = _get_podio_data(value, "fields")
# print(item_fields_dict['sales_leads'][0])
# quit()
''' Lets now programatically go through the app item records and get a list of all the fields
We will get the label/name, type, and field_id
We will zip them up and include in a nested dict for us to use further down the script
'''
app_fields = {}
for app in my_apps:
field_labels = []
field_types = []
field_ids = []
for record in item_fields_dict[app[0]]:
for field in record:
field_labels.append(field['label'])
field_types.append(field['type'])
field_ids.append(field['field_id'])
# WE USE THE SET TO REMOVE DUPLICATES ENTRIES; ZIP TO COMBINE THE THREE INTO A TUPLED LIST
app_fields[app[0]] = list(set(zip(field_labels, field_types, field_ids)))
# print(app_fields)
# quit()
dict1 = {}
for field in app_fields['sales_leads']:
dict1[field] = {}
search_str = ["'field_id': " + str(field[2]) + ", 'label'"]
record_values = []
for record in item_fields_dict['sales_leads']:
|
if any(word in str(record) for word in search_str):
# print(record)
for entry in record:
# print(entry)
# print(field[2])
# quit()
if entry['field_id'] == field[2] and entry['type'] == "app":
# print(entry)
# quit()
|
''' what do we want to do if the field we are on is the field we are searching for?
** we would want to extract the corresponding value from the object '''
if entry['type'] == "date": # what to grab when we see a date
print(entry['values'][0]['start'])
if entry['type'] == "contact":
print(entry['values'][0]['value']['name'])
if entry['type'] == "category":
print(entry['values'][0]['value']['text'])
if entry['type'] == "app":
print(entry['values'][0]['value']['app_item_id'])
quit()
else:
pass
# if entry['type'] == "category":
# print(entry['values'][0]['value']['text'])
# elif entry['type'] == "text":
# print(entry['values'][0]['value'])
# elif entry['type'] == "money":
# print(entry['values'][0]['value'])
# elif entry['type'] == "contact":
# print(entry['values'][0]['name'])
# elif entry['type'] == "app":
# print(entry['values'][0]['app_item_id'])
# quit()
# else:
# pass
# quit()
print(record_values)
quit()
'''
WE HAVE ALL THE APPS --> my_apps[0]
SO NOW WE HAVE ALL RECORDS RETURNED --> item_field_dict[my_apps[0]]
WE HAVE ALL THE AVAILABLE FIELDS --> app_fields[my_apps[0]]
How can we loop through each field for each record in each app?
my_data = {}
for app in my_app:
for record in app:
for field in app_fields[app]:
if str("'field_id': " + str(field[2]) + ", 'label'") in records:
print(record)
my_data[app][field[1]] =
find the field value (if it exist) --> append to list within dict if so, append empty string to dict if not
if field[1] == "date" then:
do this...
if field[1] == "name" then:
do this...
if field[1] == "category" then:
do this...
if field[1] == "text then:
do this...
## DICT OF ALL ITEMS RETURNED BY ALL APPS == apps_dict
print(apps_dict)
print('\n\n')
## LIST OF APPS == my_apps
print(my_apps)
print('\n\n')
## LIST OF RECORDS == item_fields_dict[my_apps[0]] (this is a dict keyed by app_name)
print(item_fields_dict['sales_leads'])
print('\n\n')
## LIST OF FIELDS == app_fields[my_apps[0]] (this is a dict keyed by app_name)
print(app_fields['sales_leads'])
print('\n\n')
'''
# quit()
for app in my_apps:
for item in item_fields_dict[app[0]]:
print(type(item))
print(item)
quit()
for item in item_fields_dict[app[0]][[1][0]]:
print(item)
quit()
# for field in app_fields[app[0]]:
def _podio_get_field_data_simple(list_to_search, field_id):
return_list = []
for record in list_to_search: # record == item in this case (1 sales lead, 1 offer, 1 appt, etc.)
search_str = ["'field_id': " + str(field_id) + ", 'label'"]
if any(word in str(record) for word in search_str): # if the record has data for the specified field..
for field in record: # field == a field (or attribute) of the item
if field['field_id'] == field_id:
try:
return_list.append(field['values'][0]['value'])
except KeyError:
return_list.append(field['values'][0]['start'])
else:
return_list.append("")
return(return_list)
app_data = {}
for app in my_apps: # app name == app[0] [sales_offer, offers, etc.]
print(app[0])
for item in item_fields_dict[app[0]]: # this is the record's fields detail
print(item) #$$ CONFIDENT THIS IS THE RECORD (i.e. RECORD)
quit()
field_values = []
for field in app_fields[app[0]]: # this is the field to query for
# print(field[2]) # CONFIDENT THIS IS THE FIELD_ID INT
# for record in item:
quit()
quit()
# for app in my_apps:
# for field in app_fields[app[0]]:
# print(item_fields_dict['offers'])
# print(field[2])
# print(_podio_get_field_data_simple(item_fields_dict['offers'], field[2]))
# quit()
def _podio_get_field_data_simple(list_to_search, field_id):
return_list = []
for record in list_to_search: # record == item in this case (1 sales lead, 1 offer, 1 appt, etc.)
search_str = ["'field_id': " + str(field_id) + ", 'label'"]
if any(word in str(record) for word in search_str): # if the record has data for the spec
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.