commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f648d6c7f484ae5774d2dbb0a0d3bc7622b3de1
|
CodeFights/tryFunctions.py
|
CodeFights/tryFunctions.py
|
#!/usr/local/bin/python
# Code Fights Try Functions Problem
import math
def tryFunctions(x, functions):
return [eval(func)(x) for func in functions]
def main():
tests = [
[1, ["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
[0.84147, 0.5403, 2, 1]],
[-20, ["abs"], [20]],
[25.5, ["lambda x: int(x)", "int", "math.floor"], [25, 25, 25]],
[3, ["math.factorial", "math.exp", "lambda x: 2 ** x"],
[6, 20.0855369232, 8]],
[-1000, ["lambda z: z", "lambda z: 1.0 * z / 13"],
[-1000, -76.9230769231]]
]
for t in tests:
res = tryFunctions(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: tryFunctions({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: tryFunctions({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights try functions problem
|
Solve Code Fights try functions problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights try functions problem
|
#!/usr/local/bin/python
# Code Fights Try Functions Problem
import math
def tryFunctions(x, functions):
return [eval(func)(x) for func in functions]
def main():
tests = [
[1, ["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
[0.84147, 0.5403, 2, 1]],
[-20, ["abs"], [20]],
[25.5, ["lambda x: int(x)", "int", "math.floor"], [25, 25, 25]],
[3, ["math.factorial", "math.exp", "lambda x: 2 ** x"],
[6, 20.0855369232, 8]],
[-1000, ["lambda z: z", "lambda z: 1.0 * z / 13"],
[-1000, -76.9230769231]]
]
for t in tests:
res = tryFunctions(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: tryFunctions({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: tryFunctions({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights try functions problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Try Functions Problem
import math
def tryFunctions(x, functions):
return [eval(func)(x) for func in functions]
def main():
tests = [
[1, ["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
[0.84147, 0.5403, 2, 1]],
[-20, ["abs"], [20]],
[25.5, ["lambda x: int(x)", "int", "math.floor"], [25, 25, 25]],
[3, ["math.factorial", "math.exp", "lambda x: 2 ** x"],
[6, 20.0855369232, 8]],
[-1000, ["lambda z: z", "lambda z: 1.0 * z / 13"],
[-1000, -76.9230769231]]
]
for t in tests:
res = tryFunctions(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: tryFunctions({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: tryFunctions({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights try functions problem#!/usr/local/bin/python
# Code Fights Try Functions Problem
import math
def tryFunctions(x, functions):
return [eval(func)(x) for func in functions]
def main():
tests = [
[1, ["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
[0.84147, 0.5403, 2, 1]],
[-20, ["abs"], [20]],
[25.5, ["lambda x: int(x)", "int", "math.floor"], [25, 25, 25]],
[3, ["math.factorial", "math.exp", "lambda x: 2 ** x"],
[6, 20.0855369232, 8]],
[-1000, ["lambda z: z", "lambda z: 1.0 * z / 13"],
[-1000, -76.9230769231]]
]
for t in tests:
res = tryFunctions(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: tryFunctions({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: tryFunctions({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights try functions problem<commit_after>#!/usr/local/bin/python
# Code Fights Try Functions Problem
import math
def tryFunctions(x, functions):
return [eval(func)(x) for func in functions]
def main():
tests = [
[1, ["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
[0.84147, 0.5403, 2, 1]],
[-20, ["abs"], [20]],
[25.5, ["lambda x: int(x)", "int", "math.floor"], [25, 25, 25]],
[3, ["math.factorial", "math.exp", "lambda x: 2 ** x"],
[6, 20.0855369232, 8]],
[-1000, ["lambda z: z", "lambda z: 1.0 * z / 13"],
[-1000, -76.9230769231]]
]
for t in tests:
res = tryFunctions(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: tryFunctions({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: tryFunctions({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
701238e19f4eaa6ce1f1c14e6e56d9544e402ed7
|
test/test_language.py
|
test/test_language.py
|
import unittest
from charset_normalizer.normalizer import CharsetNormalizerMatches as CnM
from glob import glob
from os.path import basename
class TestLanguageDetection(unittest.TestCase):
SHOULD_BE = {
'sample.1.ar.srt': 'Arabic',
'sample.1.fr.srt': 'French',
'sample.1.gr.srt': 'Greek',
'sample.1.he.srt': 'Hebrew',
'sample.1.hi.srt': 'English',
'sample.1.ru.srt': 'Russian',
'sample.1.tu.srt': 'Turkish',
'sample.2.ar.srt': 'Arabic',
'sample.3.ar.srt': 'Arabic',
'sample.4.ar.srt': 'Arabic',
'sample.5.ar.srt': 'Arabic',
'sample-chinese.txt': 'Classical Chinese',
'sample-greek.txt': 'Greek',
'sample-greek-2.txt': 'Greek',
'sample-hebrew.txt': 'English',
'sample-hebrew-2.txt': 'Hebrew',
'sample-hebrew-3.txt': 'Hebrew',
'sample-russian.txt': 'Russian',
'sample-russian-2.txt': 'Russian',
'sample-turkish.txt': 'Turkish',
'sample-korean.txt': 'Korean',
'sample-spanish.txt': 'Spanish',
'sample-bulgarian.txt': 'Bulgarian',
'sample-english.bom.txt': 'English'
}
def test_language_detection(self):
for path_name in glob('./data/*.srt') + glob('./data/*.txt'):
with self.subTest(path_name):
r_ = CnM.from_path(path_name).best().first()
self.assertEqual(
TestLanguageDetection.SHOULD_BE[basename(path_name)],
r_.language
)
if __name__ == '__main__':
unittest.main()
|
Add test to verify if language was detected properly
|
Add test to verify if language was detected properly
|
Python
|
mit
|
Ousret/charset_normalizer,ousret/charset_normalizer,Ousret/charset_normalizer,ousret/charset_normalizer
|
Add test to verify if language was detected properly
|
import unittest
from charset_normalizer.normalizer import CharsetNormalizerMatches as CnM
from glob import glob
from os.path import basename
class TestLanguageDetection(unittest.TestCase):
SHOULD_BE = {
'sample.1.ar.srt': 'Arabic',
'sample.1.fr.srt': 'French',
'sample.1.gr.srt': 'Greek',
'sample.1.he.srt': 'Hebrew',
'sample.1.hi.srt': 'English',
'sample.1.ru.srt': 'Russian',
'sample.1.tu.srt': 'Turkish',
'sample.2.ar.srt': 'Arabic',
'sample.3.ar.srt': 'Arabic',
'sample.4.ar.srt': 'Arabic',
'sample.5.ar.srt': 'Arabic',
'sample-chinese.txt': 'Classical Chinese',
'sample-greek.txt': 'Greek',
'sample-greek-2.txt': 'Greek',
'sample-hebrew.txt': 'English',
'sample-hebrew-2.txt': 'Hebrew',
'sample-hebrew-3.txt': 'Hebrew',
'sample-russian.txt': 'Russian',
'sample-russian-2.txt': 'Russian',
'sample-turkish.txt': 'Turkish',
'sample-korean.txt': 'Korean',
'sample-spanish.txt': 'Spanish',
'sample-bulgarian.txt': 'Bulgarian',
'sample-english.bom.txt': 'English'
}
def test_language_detection(self):
for path_name in glob('./data/*.srt') + glob('./data/*.txt'):
with self.subTest(path_name):
r_ = CnM.from_path(path_name).best().first()
self.assertEqual(
TestLanguageDetection.SHOULD_BE[basename(path_name)],
r_.language
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test to verify if language was detected properly<commit_after>
|
import unittest
from charset_normalizer.normalizer import CharsetNormalizerMatches as CnM
from glob import glob
from os.path import basename
class TestLanguageDetection(unittest.TestCase):
SHOULD_BE = {
'sample.1.ar.srt': 'Arabic',
'sample.1.fr.srt': 'French',
'sample.1.gr.srt': 'Greek',
'sample.1.he.srt': 'Hebrew',
'sample.1.hi.srt': 'English',
'sample.1.ru.srt': 'Russian',
'sample.1.tu.srt': 'Turkish',
'sample.2.ar.srt': 'Arabic',
'sample.3.ar.srt': 'Arabic',
'sample.4.ar.srt': 'Arabic',
'sample.5.ar.srt': 'Arabic',
'sample-chinese.txt': 'Classical Chinese',
'sample-greek.txt': 'Greek',
'sample-greek-2.txt': 'Greek',
'sample-hebrew.txt': 'English',
'sample-hebrew-2.txt': 'Hebrew',
'sample-hebrew-3.txt': 'Hebrew',
'sample-russian.txt': 'Russian',
'sample-russian-2.txt': 'Russian',
'sample-turkish.txt': 'Turkish',
'sample-korean.txt': 'Korean',
'sample-spanish.txt': 'Spanish',
'sample-bulgarian.txt': 'Bulgarian',
'sample-english.bom.txt': 'English'
}
def test_language_detection(self):
for path_name in glob('./data/*.srt') + glob('./data/*.txt'):
with self.subTest(path_name):
r_ = CnM.from_path(path_name).best().first()
self.assertEqual(
TestLanguageDetection.SHOULD_BE[basename(path_name)],
r_.language
)
if __name__ == '__main__':
unittest.main()
|
Add test to verify if language was detected properlyimport unittest
from charset_normalizer.normalizer import CharsetNormalizerMatches as CnM
from glob import glob
from os.path import basename
class TestLanguageDetection(unittest.TestCase):
SHOULD_BE = {
'sample.1.ar.srt': 'Arabic',
'sample.1.fr.srt': 'French',
'sample.1.gr.srt': 'Greek',
'sample.1.he.srt': 'Hebrew',
'sample.1.hi.srt': 'English',
'sample.1.ru.srt': 'Russian',
'sample.1.tu.srt': 'Turkish',
'sample.2.ar.srt': 'Arabic',
'sample.3.ar.srt': 'Arabic',
'sample.4.ar.srt': 'Arabic',
'sample.5.ar.srt': 'Arabic',
'sample-chinese.txt': 'Classical Chinese',
'sample-greek.txt': 'Greek',
'sample-greek-2.txt': 'Greek',
'sample-hebrew.txt': 'English',
'sample-hebrew-2.txt': 'Hebrew',
'sample-hebrew-3.txt': 'Hebrew',
'sample-russian.txt': 'Russian',
'sample-russian-2.txt': 'Russian',
'sample-turkish.txt': 'Turkish',
'sample-korean.txt': 'Korean',
'sample-spanish.txt': 'Spanish',
'sample-bulgarian.txt': 'Bulgarian',
'sample-english.bom.txt': 'English'
}
def test_language_detection(self):
for path_name in glob('./data/*.srt') + glob('./data/*.txt'):
with self.subTest(path_name):
r_ = CnM.from_path(path_name).best().first()
self.assertEqual(
TestLanguageDetection.SHOULD_BE[basename(path_name)],
r_.language
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test to verify if language was detected properly<commit_after>import unittest
from charset_normalizer.normalizer import CharsetNormalizerMatches as CnM
from glob import glob
from os.path import basename
class TestLanguageDetection(unittest.TestCase):
SHOULD_BE = {
'sample.1.ar.srt': 'Arabic',
'sample.1.fr.srt': 'French',
'sample.1.gr.srt': 'Greek',
'sample.1.he.srt': 'Hebrew',
'sample.1.hi.srt': 'English',
'sample.1.ru.srt': 'Russian',
'sample.1.tu.srt': 'Turkish',
'sample.2.ar.srt': 'Arabic',
'sample.3.ar.srt': 'Arabic',
'sample.4.ar.srt': 'Arabic',
'sample.5.ar.srt': 'Arabic',
'sample-chinese.txt': 'Classical Chinese',
'sample-greek.txt': 'Greek',
'sample-greek-2.txt': 'Greek',
'sample-hebrew.txt': 'English',
'sample-hebrew-2.txt': 'Hebrew',
'sample-hebrew-3.txt': 'Hebrew',
'sample-russian.txt': 'Russian',
'sample-russian-2.txt': 'Russian',
'sample-turkish.txt': 'Turkish',
'sample-korean.txt': 'Korean',
'sample-spanish.txt': 'Spanish',
'sample-bulgarian.txt': 'Bulgarian',
'sample-english.bom.txt': 'English'
}
def test_language_detection(self):
for path_name in glob('./data/*.srt') + glob('./data/*.txt'):
with self.subTest(path_name):
r_ = CnM.from_path(path_name).best().first()
self.assertEqual(
TestLanguageDetection.SHOULD_BE[basename(path_name)],
r_.language
)
if __name__ == '__main__':
unittest.main()
|
|
b08fd9c9770f524ec63d92c11905ab8b2f6ef35f
|
src/midonet/api.py
|
src/midonet/api.py
|
# Copyright 2012 Midokura Japan KK
class PortType:
MATERIALIZED_BRIDGE = "MaterializedBridge";
MATERIALIZED_ROUTER = "MaterializedRouter";
LOGICAL_BRIDGE = "LogicalBridge";
LOGICAL_ROUTER = "LogicalRouter";
|
Add constants for port types
|
Add constants for port types
|
Python
|
apache-2.0
|
midokura/python-midonetclient,midokura/python-midonetclient,midonet/python-midonetclient,midonet/python-midonetclient
|
Add constants for port types
|
# Copyright 2012 Midokura Japan KK
class PortType:
MATERIALIZED_BRIDGE = "MaterializedBridge";
MATERIALIZED_ROUTER = "MaterializedRouter";
LOGICAL_BRIDGE = "LogicalBridge";
LOGICAL_ROUTER = "LogicalRouter";
|
<commit_before><commit_msg>Add constants for port types<commit_after>
|
# Copyright 2012 Midokura Japan KK
class PortType:
MATERIALIZED_BRIDGE = "MaterializedBridge";
MATERIALIZED_ROUTER = "MaterializedRouter";
LOGICAL_BRIDGE = "LogicalBridge";
LOGICAL_ROUTER = "LogicalRouter";
|
Add constants for port types# Copyright 2012 Midokura Japan KK
class PortType:
MATERIALIZED_BRIDGE = "MaterializedBridge";
MATERIALIZED_ROUTER = "MaterializedRouter";
LOGICAL_BRIDGE = "LogicalBridge";
LOGICAL_ROUTER = "LogicalRouter";
|
<commit_before><commit_msg>Add constants for port types<commit_after># Copyright 2012 Midokura Japan KK
class PortType:
MATERIALIZED_BRIDGE = "MaterializedBridge";
MATERIALIZED_ROUTER = "MaterializedRouter";
LOGICAL_BRIDGE = "LogicalBridge";
LOGICAL_ROUTER = "LogicalRouter";
|
|
c86f915e324d7e66cb07cbcc9fb827c2dcdeda29
|
rst2pdf/utils.py
|
rst2pdf/utils.py
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]), int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
from styles import adjustUnits
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(adjustUnits(tokens[1]),
adjustUnits(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
Add unit support for spacers
|
Add unit support for spacers
git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@779 3777fadb-0f44-0410-9e7f-9d8fa6171d72
|
Python
|
mit
|
aquavitae/rst2pdf,aquavitae/rst2pdf,sychen/rst2pdf,aquavitae/rst2pdf-py3-dev,tonioo/rst2pdf,tonioo/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8,aquavitae/rst2pdf-py3-dev,sychen/rst2pdf,openpolis/rst2pdf-patched-docutils-0.8
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]), int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
Add unit support for spacers
git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@779 3777fadb-0f44-0410-9e7f-9d8fa6171d72
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
from styles import adjustUnits
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(adjustUnits(tokens[1]),
adjustUnits(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
<commit_before># -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]), int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
<commit_msg>Add unit support for spacers
git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@779 3777fadb-0f44-0410-9e7f-9d8fa6171d72<commit_after>
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
from styles import adjustUnits
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(adjustUnits(tokens[1]),
adjustUnits(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]), int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
Add unit support for spacers
git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@779 3777fadb-0f44-0410-9e7f-9d8fa6171d72# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
from styles import adjustUnits
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(adjustUnits(tokens[1]),
adjustUnits(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
<commit_before># -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(int(tokens[1]), int(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
<commit_msg>Add unit support for spacers
git-svn-id: 305ad3fa995f01f9ce4b4f46c2a806ba00a97020@779 3777fadb-0f44-0410-9e7f-9d8fa6171d72<commit_after># -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
import shlex
from reportlab.platypus import Spacer
from flowables import *
from styles import adjustUnits
def parseRaw(data):
"""Parse and process a simple DSL to handle creation of flowables.
Supported (can add others on request):
* PageBreak
* Spacer width, height
"""
elements = []
lines = data.splitlines()
for line in lines:
lexer = shlex.shlex(line)
lexer.whitespace += ','
tokens = list(lexer)
command = tokens[0]
if command == 'PageBreak':
if len(tokens) == 1:
elements.append(MyPageBreak())
else:
elements.append(MyPageBreak(tokens[1]))
if command == 'Spacer':
elements.append(Spacer(adjustUnits(tokens[1]),
adjustUnits(tokens[2])))
if command == 'Transition':
elements.append(Transition(*tokens[1:]))
return elements
# Looks like this is not used anywhere now:
# def depth(node):
# if node.parent == None:
# return 0
# else:
# return 1 + depth(node.parent)
|
9dcfd729c9f71794b4a6de649fed92365595034f
|
tests/gl_test_2.py
|
tests/gl_test_2.py
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.
|
Test two windows drawing GL with different contexts.
|
Python
|
bsd-3-clause
|
mammadori/pyglet,theblacklion/pyglet,niklaskorz/pyglet,theblacklion/pyglet,adamlwgriffiths/Pyglet,seeminglee/pyglet64,theblacklion/pyglet,adamlwgriffiths/Pyglet,oktayacikalin/pyglet,niklaskorz/pyglet,seeminglee/pyglet64,mammadori/pyglet,mammadori/pyglet,adamlwgriffiths/Pyglet,oktayacikalin/pyglet,niklaskorz/pyglet,oktayacikalin/pyglet,oktayacikalin/pyglet,oktayacikalin/pyglet,seeminglee/pyglet64,theblacklion/pyglet,theblacklion/pyglet,niklaskorz/pyglet,mammadori/pyglet,adamlwgriffiths/Pyglet
|
Test two windows drawing GL with different contexts.
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.<commit_after>
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.<commit_after>#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
|
6c21b012c8ee8f4bb3f989c999f7d85ad99878b4
|
compressImg2TrainData.py
|
compressImg2TrainData.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os
import numpy as np
LABEL_MAGIC_NUMBER = 2049
IMAGE_MAGIC_NUMBER = 2051
TRAIN_DIR = "data"
def _make32(val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
img_data = []
data_label = []
data_size = {}
for dirname in next(os.walk(TRAIN_DIR))[1]:
data_label.append(dirname)
files = next(os.walk(TRAIN_DIR + "/" + dirname))[2]
data_size[dirname] = len(files)
for filename in files:
img_file = TRAIN_DIR + "/" + dirname + "/" + filename
#print(img_file)
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
img_data = np.r_[img_data, imgg[:,:].reshape(imgg.size)]
# make a train label data
# make header
ldata = _make32(LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, _make32(sum(data_size.values()))]
# write value
for i,v in enumerate(data_label):
ldata = np.r_[ldata, [i]*data_size[v]]
np.array(ldata, dtype=np.uint8).tofile(TRAIN_DIR + "/labels.idx1-ubyte")
# make a train image data
# make header
idata = _make32(IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, _make32(sum(data_size.values()))]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, img_data]
np.array(idata, dtype=np.uint8).tofile(TRAIN_DIR + "/images.idx3-ubyte")
with open(TRAIN_DIR + "/label_name.txt", 'w') as f:
f.write(",".join(["\"" + x + "\"" for x in data_label]))
|
Add a utility file to make a train data for MNIST sample.
|
Add a utility file to make a train data for MNIST sample.
|
Python
|
apache-2.0
|
yoneken/train_tf
|
Add a utility file to make a train data for MNIST sample.
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os
import numpy as np
LABEL_MAGIC_NUMBER = 2049
IMAGE_MAGIC_NUMBER = 2051
TRAIN_DIR = "data"
def _make32(val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
img_data = []
data_label = []
data_size = {}
for dirname in next(os.walk(TRAIN_DIR))[1]:
data_label.append(dirname)
files = next(os.walk(TRAIN_DIR + "/" + dirname))[2]
data_size[dirname] = len(files)
for filename in files:
img_file = TRAIN_DIR + "/" + dirname + "/" + filename
#print(img_file)
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
img_data = np.r_[img_data, imgg[:,:].reshape(imgg.size)]
# make a train label data
# make header
ldata = _make32(LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, _make32(sum(data_size.values()))]
# write value
for i,v in enumerate(data_label):
ldata = np.r_[ldata, [i]*data_size[v]]
np.array(ldata, dtype=np.uint8).tofile(TRAIN_DIR + "/labels.idx1-ubyte")
# make a train image data
# make header
idata = _make32(IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, _make32(sum(data_size.values()))]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, img_data]
np.array(idata, dtype=np.uint8).tofile(TRAIN_DIR + "/images.idx3-ubyte")
with open(TRAIN_DIR + "/label_name.txt", 'w') as f:
f.write(",".join(["\"" + x + "\"" for x in data_label]))
|
<commit_before><commit_msg>Add a utility file to make a train data for MNIST sample.<commit_after>
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os
import numpy as np
LABEL_MAGIC_NUMBER = 2049
IMAGE_MAGIC_NUMBER = 2051
TRAIN_DIR = "data"
def _make32(val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
img_data = []
data_label = []
data_size = {}
for dirname in next(os.walk(TRAIN_DIR))[1]:
data_label.append(dirname)
files = next(os.walk(TRAIN_DIR + "/" + dirname))[2]
data_size[dirname] = len(files)
for filename in files:
img_file = TRAIN_DIR + "/" + dirname + "/" + filename
#print(img_file)
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
img_data = np.r_[img_data, imgg[:,:].reshape(imgg.size)]
# make a train label data
# make header
ldata = _make32(LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, _make32(sum(data_size.values()))]
# write value
for i,v in enumerate(data_label):
ldata = np.r_[ldata, [i]*data_size[v]]
np.array(ldata, dtype=np.uint8).tofile(TRAIN_DIR + "/labels.idx1-ubyte")
# make a train image data
# make header
idata = _make32(IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, _make32(sum(data_size.values()))]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, img_data]
np.array(idata, dtype=np.uint8).tofile(TRAIN_DIR + "/images.idx3-ubyte")
with open(TRAIN_DIR + "/label_name.txt", 'w') as f:
f.write(",".join(["\"" + x + "\"" for x in data_label]))
|
Add a utility file to make a train data for MNIST sample.#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os
import numpy as np
LABEL_MAGIC_NUMBER = 2049
IMAGE_MAGIC_NUMBER = 2051
TRAIN_DIR = "data"
def _make32(val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
img_data = []
data_label = []
data_size = {}
for dirname in next(os.walk(TRAIN_DIR))[1]:
data_label.append(dirname)
files = next(os.walk(TRAIN_DIR + "/" + dirname))[2]
data_size[dirname] = len(files)
for filename in files:
img_file = TRAIN_DIR + "/" + dirname + "/" + filename
#print(img_file)
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
img_data = np.r_[img_data, imgg[:,:].reshape(imgg.size)]
# make a train label data
# make header
ldata = _make32(LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, _make32(sum(data_size.values()))]
# write value
for i,v in enumerate(data_label):
ldata = np.r_[ldata, [i]*data_size[v]]
np.array(ldata, dtype=np.uint8).tofile(TRAIN_DIR + "/labels.idx1-ubyte")
# make a train image data
# make header
idata = _make32(IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, _make32(sum(data_size.values()))]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, img_data]
np.array(idata, dtype=np.uint8).tofile(TRAIN_DIR + "/images.idx3-ubyte")
with open(TRAIN_DIR + "/label_name.txt", 'w') as f:
f.write(",".join(["\"" + x + "\"" for x in data_label]))
|
<commit_before><commit_msg>Add a utility file to make a train data for MNIST sample.<commit_after>#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os
import numpy as np
LABEL_MAGIC_NUMBER = 2049
IMAGE_MAGIC_NUMBER = 2051
TRAIN_DIR = "data"
def _make32(val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
img_data = []
data_label = []
data_size = {}
for dirname in next(os.walk(TRAIN_DIR))[1]:
data_label.append(dirname)
files = next(os.walk(TRAIN_DIR + "/" + dirname))[2]
data_size[dirname] = len(files)
for filename in files:
img_file = TRAIN_DIR + "/" + dirname + "/" + filename
#print(img_file)
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
img_data = np.r_[img_data, imgg[:,:].reshape(imgg.size)]
# make a train label data
# make header
ldata = _make32(LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, _make32(sum(data_size.values()))]
# write value
for i,v in enumerate(data_label):
ldata = np.r_[ldata, [i]*data_size[v]]
np.array(ldata, dtype=np.uint8).tofile(TRAIN_DIR + "/labels.idx1-ubyte")
# make a train image data
# make header
idata = _make32(IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, _make32(sum(data_size.values()))]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, _make32(28)]
idata = np.r_[idata, img_data]
np.array(idata, dtype=np.uint8).tofile(TRAIN_DIR + "/images.idx3-ubyte")
with open(TRAIN_DIR + "/label_name.txt", 'w') as f:
f.write(",".join(["\"" + x + "\"" for x in data_label]))
|
|
c613bd3995344bbb164a8f64b39c3a94f6b3ce48
|
begood_sites/management/commands/fix_root_site_id.py
|
begood_sites/management/commands/fix_root_site_id.py
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Fix any broken article urls due to wrong root site id'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(domain=id_or_name)
except Site.DoesNotExist:
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
root_site = site.settings.root_site
# Remove any duplicate urls
from begood.models import ArticleUrl
on_new = ArticleUrl.objects.filter(article__sites=site).filter(site=root_site).values_list('article_id', flat=True)
ArticleUrl.objects.filter(article_id__in=on_new).exclude(site=root_site).delete()
# Update all urls to the new root site
ArticleUrl.objects.filter(article__sites=site).update(site=site)
|
Add a management command to fix broken article urls when changing root site id.
|
Add a management command to fix broken article urls when changing root site id.
|
Python
|
mit
|
AGoodId/begood-sites
|
Add a management command to fix broken article urls when changing root site id.
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Fix any broken article urls due to wrong root site id'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(domain=id_or_name)
except Site.DoesNotExist:
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
root_site = site.settings.root_site
# Remove any duplicate urls
from begood.models import ArticleUrl
on_new = ArticleUrl.objects.filter(article__sites=site).filter(site=root_site).values_list('article_id', flat=True)
ArticleUrl.objects.filter(article_id__in=on_new).exclude(site=root_site).delete()
# Update all urls to the new root site
ArticleUrl.objects.filter(article__sites=site).update(site=site)
|
<commit_before><commit_msg>Add a management command to fix broken article urls when changing root site id.<commit_after>
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Fix any broken article urls due to wrong root site id'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(domain=id_or_name)
except Site.DoesNotExist:
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
root_site = site.settings.root_site
# Remove any duplicate urls
from begood.models import ArticleUrl
on_new = ArticleUrl.objects.filter(article__sites=site).filter(site=root_site).values_list('article_id', flat=True)
ArticleUrl.objects.filter(article_id__in=on_new).exclude(site=root_site).delete()
# Update all urls to the new root site
ArticleUrl.objects.filter(article__sites=site).update(site=site)
|
Add a management command to fix broken article urls when changing root site id.# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Fix any broken article urls due to wrong root site id'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(domain=id_or_name)
except Site.DoesNotExist:
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
root_site = site.settings.root_site
# Remove any duplicate urls
from begood.models import ArticleUrl
on_new = ArticleUrl.objects.filter(article__sites=site).filter(site=root_site).values_list('article_id', flat=True)
ArticleUrl.objects.filter(article_id__in=on_new).exclude(site=root_site).delete()
# Update all urls to the new root site
ArticleUrl.objects.filter(article__sites=site).update(site=site)
|
<commit_before><commit_msg>Add a management command to fix broken article urls when changing root site id.<commit_after># coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Fix any broken article urls due to wrong root site id'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(domain=id_or_name)
except Site.DoesNotExist:
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
root_site = site.settings.root_site
# Remove any duplicate urls
from begood.models import ArticleUrl
on_new = ArticleUrl.objects.filter(article__sites=site).filter(site=root_site).values_list('article_id', flat=True)
ArticleUrl.objects.filter(article_id__in=on_new).exclude(site=root_site).delete()
# Update all urls to the new root site
ArticleUrl.objects.filter(article__sites=site).update(site=site)
|
|
a9d5ac5a3ed0d43dfa3a7f7034d7f33771263f91
|
tests/unit/utils/format_call_test.py
|
tests/unit/utils/format_call_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
|
Add unit tests to `salt.utils.format_call()`.
|
Add unit tests to `salt.utils.format_call()`.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit tests to `salt.utils.format_call()`.
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
|
<commit_before><commit_msg>Add unit tests to `salt.utils.format_call()`.<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
|
Add unit tests to `salt.utils.format_call()`.# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
|
<commit_before><commit_msg>Add unit tests to `salt.utils.format_call()`.<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.unit.utils.format_call_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test `salt.utils.format_call`
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import format_call
from salt.exceptions import SaltInvocationError
class TestFormatCall(TestCase):
def test_simple_args_passing(self):
def foo(one, two=2, three=3):
pass
self.assertEqual(
format_call(foo, dict(one=10, two=20, three=30)),
{'args': [10], 'kwargs': dict(two=20, three=30)}
)
self.assertEqual(
format_call(foo, dict(one=10, two=20)),
{'args': [10], 'kwargs': dict(two=20, three=3)}
)
self.assertEqual(
format_call(foo, dict(one=2)),
{'args': [2], 'kwargs': dict(two=2, three=3)}
)
def test_mimic_typeerror_exceptions(self):
def foo(one, two=2, three=3):
pass
def foo2(one, two, three=3):
pass
with self.assertRaisesRegexp(
SaltInvocationError,
r'foo takes at least 1 argument \(0 given\)'):
format_call(foo, dict(two=3))
with self.assertRaisesRegexp(
TypeError,
r'foo2 takes at least 2 arguments \(1 given\)'):
format_call(foo2, dict(one=1))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestFormatCall, needs_daemon=False)
|
|
67d3a8d5716e9c634c70388361a2fc0edbc80961
|
old/request.py
|
old/request.py
|
from exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
Move original to this folder
|
Move original to this folder
Original was pulling back USD exchanges - now only looking at EUR
exchanges
|
Python
|
mit
|
Humantrashcan/prices
|
Move original to this folder
Original was pulling back USD exchanges - now only looking at EUR
exchanges
|
from exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
<commit_before><commit_msg>Move original to this folder
Original was pulling back USD exchanges - now only looking at EUR
exchanges<commit_after>
|
from exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
Move original to this folder
Original was pulling back USD exchanges - now only looking at EUR
exchangesfrom exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
<commit_before><commit_msg>Move original to this folder
Original was pulling back USD exchanges - now only looking at EUR
exchanges<commit_after>from exchanges import helpers
from exchanges import bitfinex
from exchanges import bitstamp
from exchanges import okcoin
from exchanges import cex
from exchanges import btce
from time import sleep
from datetime import datetime
import csv
# PREPARE OUTPUT FILE
# tell computer where to put CSV
filename = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
outfile_path='csvoutput'+filename+'.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['datetime', 'bitstamp_price', 'bitstamp_bid', 'bitstamp_ask', 'bitfinex_price', 'bitfinex_bid', 'bitfinex_ask', 'okcoin_price', 'okcoin_bid', 'okcoin_ask', 'cex_price', 'cex_bid', 'cex_ask', 'btc-e_price', 'btc-e_bid', 'btc_ask']
#write the row of headings to our CSV file
writer.writerow(headers)
# GET DATA, PUT INTO FILE - LOOP FOR A CERTAIN TIME
#set a counter telling us how many times we've gone through the loop, this is the first time, so we'll set it at 1
i = 1
#loop through pages of JSON returned, 100 is an arbitrary number
while i < 200:
#print out what number loop we are on, which will make it easier to track down problems when they appear
print i
#initialize the row
row = []
#add every 'cell' to the row list, identifying the item just like an index in a list
row.append(datetime.now())
row.append(bitstamp.get_current_price())
row.append(bitstamp.get_current_bid())
row.append(bitstamp.get_current_ask())
row.append(bitfinex.get_current_price())
row.append(bitfinex.get_current_bid())
row.append(bitfinex.get_current_ask())
row.append(okcoin.get_current_price())
row.append(okcoin.get_current_bid())
row.append(okcoin.get_current_ask())
row.append(cex.get_current_price())
row.append(cex.get_current_bid())
row.append(cex.get_current_ask())
row.append(btce.get_current_price())
row.append(btce.get_current_ask())
row.append(btce.get_current_bid())
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
#increment our loop counter, now we're on the next time through the loop
i = i + 1
#tell Python to rest for 5 secs, so we don't exceed our rate limit
sleep(5)
|
|
80dd6f7c2b3b16e638ab5d836758d5b60c8a82d5
|
distutils/tests/test_ccompiler.py
|
distutils/tests/test_ccompiler.py
|
from distutils import ccompiler
def test_set_include_dirs(tmp_path):
"""
Extensions should build even if set_include_dirs is invoked.
In particular, compiler-specific paths should not be overridden.
"""
c_file = tmp_path / 'foo.c'
c_file.write_text('void PyInit_foo(void) {}\n')
compiler = ccompiler.new_compiler()
compiler.set_include_dirs([])
compiler.compile([c_file])
|
Add test capturing failed expectation.
|
Add test capturing failed expectation.
|
Python
|
mit
|
pypa/setuptools,pypa/setuptools,pypa/setuptools
|
Add test capturing failed expectation.
|
from distutils import ccompiler
def test_set_include_dirs(tmp_path):
"""
Extensions should build even if set_include_dirs is invoked.
In particular, compiler-specific paths should not be overridden.
"""
c_file = tmp_path / 'foo.c'
c_file.write_text('void PyInit_foo(void) {}\n')
compiler = ccompiler.new_compiler()
compiler.set_include_dirs([])
compiler.compile([c_file])
|
<commit_before><commit_msg>Add test capturing failed expectation.<commit_after>
|
from distutils import ccompiler
def test_set_include_dirs(tmp_path):
"""
Extensions should build even if set_include_dirs is invoked.
In particular, compiler-specific paths should not be overridden.
"""
c_file = tmp_path / 'foo.c'
c_file.write_text('void PyInit_foo(void) {}\n')
compiler = ccompiler.new_compiler()
compiler.set_include_dirs([])
compiler.compile([c_file])
|
Add test capturing failed expectation.
from distutils import ccompiler
def test_set_include_dirs(tmp_path):
"""
Extensions should build even if set_include_dirs is invoked.
In particular, compiler-specific paths should not be overridden.
"""
c_file = tmp_path / 'foo.c'
c_file.write_text('void PyInit_foo(void) {}\n')
compiler = ccompiler.new_compiler()
compiler.set_include_dirs([])
compiler.compile([c_file])
|
<commit_before><commit_msg>Add test capturing failed expectation.<commit_after>
from distutils import ccompiler
def test_set_include_dirs(tmp_path):
"""
Extensions should build even if set_include_dirs is invoked.
In particular, compiler-specific paths should not be overridden.
"""
c_file = tmp_path / 'foo.c'
c_file.write_text('void PyInit_foo(void) {}\n')
compiler = ccompiler.new_compiler()
compiler.set_include_dirs([])
compiler.compile([c_file])
|
|
8d9f8277e5d346512f1ecc4fd0be0f757c6dfae9
|
ariia/weather_module.py
|
ariia/weather_module.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
|
Add the weather module class
|
Add the weather module class
|
Python
|
mit
|
Pandhariix/ARIIA
|
Add the weather module class
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
|
<commit_before><commit_msg>Add the weather module class<commit_after>
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
|
Add the weather module class# -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
|
<commit_before><commit_msg>Add the weather module class<commit_after># -*- coding: utf-8 -*-
# !/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Maxime Busy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class WeatherModule:
"""
Class monitoring the weather, and retreiving the weather data thanks to
pyowm
"""
def __init__(self):
"""
Constructor
"""
pass
|
|
7d82be33689d5734fd5ec247dafac5c58536e3c9
|
tests/actor_test.py
|
tests/actor_test.py
|
import unittest
from pykka import Actor
class ActorInterruptTest(unittest.TestCase):
def setUp(self):
class ActorWithInterrupt(Actor):
def run_inside_try(self):
raise KeyboardInterrupt
self.actor = ActorWithInterrupt()
def test_issuing_keyboard_interrupt_stops_process(self):
try:
self.actor.run()
self.fail('Should throw SystemExit exception')
except SystemExit:
pass
|
Test clean exit at keyboard interrupt
|
Test clean exit at keyboard interrupt
|
Python
|
apache-2.0
|
jodal/pykka,tempbottle/pykka,tamland/pykka
|
Test clean exit at keyboard interrupt
|
import unittest
from pykka import Actor
class ActorInterruptTest(unittest.TestCase):
def setUp(self):
class ActorWithInterrupt(Actor):
def run_inside_try(self):
raise KeyboardInterrupt
self.actor = ActorWithInterrupt()
def test_issuing_keyboard_interrupt_stops_process(self):
try:
self.actor.run()
self.fail('Should throw SystemExit exception')
except SystemExit:
pass
|
<commit_before><commit_msg>Test clean exit at keyboard interrupt<commit_after>
|
import unittest
from pykka import Actor
class ActorInterruptTest(unittest.TestCase):
def setUp(self):
class ActorWithInterrupt(Actor):
def run_inside_try(self):
raise KeyboardInterrupt
self.actor = ActorWithInterrupt()
def test_issuing_keyboard_interrupt_stops_process(self):
try:
self.actor.run()
self.fail('Should throw SystemExit exception')
except SystemExit:
pass
|
Test clean exit at keyboard interruptimport unittest
from pykka import Actor
class ActorInterruptTest(unittest.TestCase):
def setUp(self):
class ActorWithInterrupt(Actor):
def run_inside_try(self):
raise KeyboardInterrupt
self.actor = ActorWithInterrupt()
def test_issuing_keyboard_interrupt_stops_process(self):
try:
self.actor.run()
self.fail('Should throw SystemExit exception')
except SystemExit:
pass
|
<commit_before><commit_msg>Test clean exit at keyboard interrupt<commit_after>import unittest
from pykka import Actor
class ActorInterruptTest(unittest.TestCase):
def setUp(self):
class ActorWithInterrupt(Actor):
def run_inside_try(self):
raise KeyboardInterrupt
self.actor = ActorWithInterrupt()
def test_issuing_keyboard_interrupt_stops_process(self):
try:
self.actor.run()
self.fail('Should throw SystemExit exception')
except SystemExit:
pass
|
|
c212216b98d3323073b0c2f8d71a2d4543abfcc2
|
plots/plot-timing-histograms.py
|
plots/plot-timing-histograms.py
|
#!/usr/bin/env python
import climate
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
def diffs(t):
t.load()
return 1000 * np.diff(t.index.values)
def main(root, pattern='*'):
trials = lmj.cubes.Experiment(root).trials_matching(pattern)
values = joblib.Parallel(-1)(joblib.delayed(diffs)(t) for t in trials)
values = [x for xs in values for x in xs]
with lmj.plot.axes(spines=True, max_xticks=6, max_yticks=6) as ax:
ax.hist(values, bins=np.linspace(5, 15, 127), lw=0, stacked=True)
ax.set_xlim(5, 15)
ax.set_xlabel('Time Between Frames (msec)')
ax.set_ylabel('Number of Observations')
if __name__ == '__main__':
climate.call(main)
|
Add script for plotting time interval hists.
|
Add script for plotting time interval hists.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add script for plotting time interval hists.
|
#!/usr/bin/env python
import climate
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
def diffs(t):
t.load()
return 1000 * np.diff(t.index.values)
def main(root, pattern='*'):
trials = lmj.cubes.Experiment(root).trials_matching(pattern)
values = joblib.Parallel(-1)(joblib.delayed(diffs)(t) for t in trials)
values = [x for xs in values for x in xs]
with lmj.plot.axes(spines=True, max_xticks=6, max_yticks=6) as ax:
ax.hist(values, bins=np.linspace(5, 15, 127), lw=0, stacked=True)
ax.set_xlim(5, 15)
ax.set_xlabel('Time Between Frames (msec)')
ax.set_ylabel('Number of Observations')
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for plotting time interval hists.<commit_after>
|
#!/usr/bin/env python
import climate
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
def diffs(t):
t.load()
return 1000 * np.diff(t.index.values)
def main(root, pattern='*'):
trials = lmj.cubes.Experiment(root).trials_matching(pattern)
values = joblib.Parallel(-1)(joblib.delayed(diffs)(t) for t in trials)
values = [x for xs in values for x in xs]
with lmj.plot.axes(spines=True, max_xticks=6, max_yticks=6) as ax:
ax.hist(values, bins=np.linspace(5, 15, 127), lw=0, stacked=True)
ax.set_xlim(5, 15)
ax.set_xlabel('Time Between Frames (msec)')
ax.set_ylabel('Number of Observations')
if __name__ == '__main__':
climate.call(main)
|
Add script for plotting time interval hists.#!/usr/bin/env python
import climate
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
def diffs(t):
t.load()
return 1000 * np.diff(t.index.values)
def main(root, pattern='*'):
trials = lmj.cubes.Experiment(root).trials_matching(pattern)
values = joblib.Parallel(-1)(joblib.delayed(diffs)(t) for t in trials)
values = [x for xs in values for x in xs]
with lmj.plot.axes(spines=True, max_xticks=6, max_yticks=6) as ax:
ax.hist(values, bins=np.linspace(5, 15, 127), lw=0, stacked=True)
ax.set_xlim(5, 15)
ax.set_xlabel('Time Between Frames (msec)')
ax.set_ylabel('Number of Observations')
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for plotting time interval hists.<commit_after>#!/usr/bin/env python
import climate
import joblib
import lmj.cubes
import lmj.plot
import numpy as np
def diffs(t):
t.load()
return 1000 * np.diff(t.index.values)
def main(root, pattern='*'):
trials = lmj.cubes.Experiment(root).trials_matching(pattern)
values = joblib.Parallel(-1)(joblib.delayed(diffs)(t) for t in trials)
values = [x for xs in values for x in xs]
with lmj.plot.axes(spines=True, max_xticks=6, max_yticks=6) as ax:
ax.hist(values, bins=np.linspace(5, 15, 127), lw=0, stacked=True)
ax.set_xlim(5, 15)
ax.set_xlabel('Time Between Frames (msec)')
ax.set_ylabel('Number of Observations')
if __name__ == '__main__':
climate.call(main)
|
|
f71045f6bef5c8b9f7274ec41a965ccbe1044a01
|
examples/test_markers.py
|
examples/test_markers.py
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m xkcd_code # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.xkcd_code # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m marker3 # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.marker3 # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
Update pytest marker test suite
|
Update pytest marker test suite
|
Python
|
mit
|
mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m xkcd_code # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.xkcd_code # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
Update pytest marker test suite
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m marker3 # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.marker3 # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
<commit_before>""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m xkcd_code # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.xkcd_code # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
<commit_msg>Update pytest marker test suite<commit_after>
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m marker3 # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.marker3 # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m xkcd_code # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.xkcd_code # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
Update pytest marker test suite""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m marker3 # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.marker3 # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
<commit_before>""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m xkcd_code # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.xkcd_code # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
<commit_msg>Update pytest marker test suite<commit_after>""" These tests demonstrate pytest marker use for finding and running tests.
Usage examples from this file:
pytest -v -m marker_test_suite # Runs A, B, C, D
pytest -v -m marker1 # Runs A
pytest -v -m marker2 # Runs B, C
pytest -v -m marker3 # Runs C
pytest test_markers.py -v -m "not marker2" # Runs A, D
(The "-v" will display the names of tests as they run.)
(Add "--collect-only" to display names of tests without running them.)
"""
import pytest
from seleniumbase import BaseCase
@pytest.mark.marker_test_suite
class MarkerTestSuite(BaseCase):
@pytest.mark.marker1
def test_A(self):
self.open("https://xkcd.com/1319/")
self.assert_text("Automation", "div#ctitle")
@pytest.mark.marker2
def test_B(self):
self.open("https://www.xkcd.com/1700/")
self.assert_text("New Bug", "div#ctitle")
@pytest.mark.marker2
@pytest.mark.marker3 # Tests can have multiple markers
def test_C(self):
self.open("https://xkcd.com/844/")
self.assert_text("Good Code", "div#ctitle")
def test_D(self):
self.open("https://xkcd.com/2021/")
self.assert_text("Software Development", "div#ctitle")
|
ce5247a01f5ef84336bd81636fb9c04d90de0c12
|
ddm.py
|
ddm.py
|
from common import *
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE, sub
from urlparse import urljoin
cache = Cache()
titlePattern = compile("<TITLE>(.*?)</TITLE>")
contentPattern = compile("(?:<BR>\s+<BLOCKQUOTE>|<H3 ALIGN=\"CENTER\">)(.+)</BLOCKQUOTE>.+?<A HREF=\"ancilpag.html#DP\">Dramatis personae</A>", DOTALL|MULTILINE) # <A HREF=\"[^\"]+\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>" , DOTALL|MULTILINE)
nextPattern = compile("<A HREF=\"([^\"]+)\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>")
volumePattern = compile("<A HREF=\"(v\dcont.html)\"><IMG HEIGHT=\"\d+\" WIDTH=\"\d+\" SRC=\"graphics/[^\"]+\" ALIGN=\"[^\"]+\" BORDER=\"0\" ALT=\"(Volume \d - [^\"]+)\">")
baseURL = "http://www.b5-dark-mirror.co.uk/"
page = cache.get(baseURL, max_age = -1)
data = page.read()
volumes = sorted(volumePattern.findall(data))
for volumeUrl, volumeTitle in volumes:
print volumeTitle
title = "A Dark, Distorted Mirror: " + volumeTitle
toc = tocStart(title)
volumePage = cache.get(urljoin(baseURL, volumeUrl), max_age = -1).read()
splitPattern = compile("<HR><A NAME=\"P[A-Z\d]\"></A>")
chapterPattern = compile("<A HREF=\"(v\dp.+?.html)\">Chapter (\d+)</A>")
partPattern = compile(" SIZE=\"\+1\">(.*?)</FONT>")
for section in splitPattern.split(volumePage):
chapters = chapterPattern.findall(section)
if chapters == []:
continue
part = sub('<[^>]*>', '', partPattern.findall(section)[0])
print "\t", part, chapters
for chapterUrl, _ in chapters:
url = urljoin(baseURL, chapterUrl)
print url
chapterPage = cache.get(url, max_age = -1).read()
content = contentPattern.search(chapterPage).groups()[0]
chapterTitle = titlePattern.search(chapterPage).groups()[0]
generatePage(url, chapterTitle, content, title, toc)
tocEnd(toc)
makeMobi(title, "Gareth Williams")
#break
|
Add specialist creation script for Dark Distorted Mirror
|
Add specialist creation script for Dark Distorted Mirror
|
Python
|
agpl-3.0
|
palfrey/book-blog
|
Add specialist creation script for Dark Distorted Mirror
|
from common import *
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE, sub
from urlparse import urljoin
cache = Cache()
titlePattern = compile("<TITLE>(.*?)</TITLE>")
contentPattern = compile("(?:<BR>\s+<BLOCKQUOTE>|<H3 ALIGN=\"CENTER\">)(.+)</BLOCKQUOTE>.+?<A HREF=\"ancilpag.html#DP\">Dramatis personae</A>", DOTALL|MULTILINE) # <A HREF=\"[^\"]+\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>" , DOTALL|MULTILINE)
nextPattern = compile("<A HREF=\"([^\"]+)\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>")
volumePattern = compile("<A HREF=\"(v\dcont.html)\"><IMG HEIGHT=\"\d+\" WIDTH=\"\d+\" SRC=\"graphics/[^\"]+\" ALIGN=\"[^\"]+\" BORDER=\"0\" ALT=\"(Volume \d - [^\"]+)\">")
baseURL = "http://www.b5-dark-mirror.co.uk/"
page = cache.get(baseURL, max_age = -1)
data = page.read()
volumes = sorted(volumePattern.findall(data))
for volumeUrl, volumeTitle in volumes:
print volumeTitle
title = "A Dark, Distorted Mirror: " + volumeTitle
toc = tocStart(title)
volumePage = cache.get(urljoin(baseURL, volumeUrl), max_age = -1).read()
splitPattern = compile("<HR><A NAME=\"P[A-Z\d]\"></A>")
chapterPattern = compile("<A HREF=\"(v\dp.+?.html)\">Chapter (\d+)</A>")
partPattern = compile(" SIZE=\"\+1\">(.*?)</FONT>")
for section in splitPattern.split(volumePage):
chapters = chapterPattern.findall(section)
if chapters == []:
continue
part = sub('<[^>]*>', '', partPattern.findall(section)[0])
print "\t", part, chapters
for chapterUrl, _ in chapters:
url = urljoin(baseURL, chapterUrl)
print url
chapterPage = cache.get(url, max_age = -1).read()
content = contentPattern.search(chapterPage).groups()[0]
chapterTitle = titlePattern.search(chapterPage).groups()[0]
generatePage(url, chapterTitle, content, title, toc)
tocEnd(toc)
makeMobi(title, "Gareth Williams")
#break
|
<commit_before><commit_msg>Add specialist creation script for Dark Distorted Mirror<commit_after>
|
from common import *
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE, sub
from urlparse import urljoin
cache = Cache()
titlePattern = compile("<TITLE>(.*?)</TITLE>")
contentPattern = compile("(?:<BR>\s+<BLOCKQUOTE>|<H3 ALIGN=\"CENTER\">)(.+)</BLOCKQUOTE>.+?<A HREF=\"ancilpag.html#DP\">Dramatis personae</A>", DOTALL|MULTILINE) # <A HREF=\"[^\"]+\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>" , DOTALL|MULTILINE)
nextPattern = compile("<A HREF=\"([^\"]+)\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>")
volumePattern = compile("<A HREF=\"(v\dcont.html)\"><IMG HEIGHT=\"\d+\" WIDTH=\"\d+\" SRC=\"graphics/[^\"]+\" ALIGN=\"[^\"]+\" BORDER=\"0\" ALT=\"(Volume \d - [^\"]+)\">")
baseURL = "http://www.b5-dark-mirror.co.uk/"
page = cache.get(baseURL, max_age = -1)
data = page.read()
volumes = sorted(volumePattern.findall(data))
for volumeUrl, volumeTitle in volumes:
print volumeTitle
title = "A Dark, Distorted Mirror: " + volumeTitle
toc = tocStart(title)
volumePage = cache.get(urljoin(baseURL, volumeUrl), max_age = -1).read()
splitPattern = compile("<HR><A NAME=\"P[A-Z\d]\"></A>")
chapterPattern = compile("<A HREF=\"(v\dp.+?.html)\">Chapter (\d+)</A>")
partPattern = compile(" SIZE=\"\+1\">(.*?)</FONT>")
for section in splitPattern.split(volumePage):
chapters = chapterPattern.findall(section)
if chapters == []:
continue
part = sub('<[^>]*>', '', partPattern.findall(section)[0])
print "\t", part, chapters
for chapterUrl, _ in chapters:
url = urljoin(baseURL, chapterUrl)
print url
chapterPage = cache.get(url, max_age = -1).read()
content = contentPattern.search(chapterPage).groups()[0]
chapterTitle = titlePattern.search(chapterPage).groups()[0]
generatePage(url, chapterTitle, content, title, toc)
tocEnd(toc)
makeMobi(title, "Gareth Williams")
#break
|
Add specialist creation script for Dark Distorted Mirrorfrom common import *
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE, sub
from urlparse import urljoin
cache = Cache()
titlePattern = compile("<TITLE>(.*?)</TITLE>")
contentPattern = compile("(?:<BR>\s+<BLOCKQUOTE>|<H3 ALIGN=\"CENTER\">)(.+)</BLOCKQUOTE>.+?<A HREF=\"ancilpag.html#DP\">Dramatis personae</A>", DOTALL|MULTILINE) # <A HREF=\"[^\"]+\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>" , DOTALL|MULTILINE)
nextPattern = compile("<A HREF=\"([^\"]+)\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>")
volumePattern = compile("<A HREF=\"(v\dcont.html)\"><IMG HEIGHT=\"\d+\" WIDTH=\"\d+\" SRC=\"graphics/[^\"]+\" ALIGN=\"[^\"]+\" BORDER=\"0\" ALT=\"(Volume \d - [^\"]+)\">")
baseURL = "http://www.b5-dark-mirror.co.uk/"
page = cache.get(baseURL, max_age = -1)
data = page.read()
volumes = sorted(volumePattern.findall(data))
for volumeUrl, volumeTitle in volumes:
print volumeTitle
title = "A Dark, Distorted Mirror: " + volumeTitle
toc = tocStart(title)
volumePage = cache.get(urljoin(baseURL, volumeUrl), max_age = -1).read()
splitPattern = compile("<HR><A NAME=\"P[A-Z\d]\"></A>")
chapterPattern = compile("<A HREF=\"(v\dp.+?.html)\">Chapter (\d+)</A>")
partPattern = compile(" SIZE=\"\+1\">(.*?)</FONT>")
for section in splitPattern.split(volumePage):
chapters = chapterPattern.findall(section)
if chapters == []:
continue
part = sub('<[^>]*>', '', partPattern.findall(section)[0])
print "\t", part, chapters
for chapterUrl, _ in chapters:
url = urljoin(baseURL, chapterUrl)
print url
chapterPage = cache.get(url, max_age = -1).read()
content = contentPattern.search(chapterPage).groups()[0]
chapterTitle = titlePattern.search(chapterPage).groups()[0]
generatePage(url, chapterTitle, content, title, toc)
tocEnd(toc)
makeMobi(title, "Gareth Williams")
#break
|
<commit_before><commit_msg>Add specialist creation script for Dark Distorted Mirror<commit_after>from common import *
from urlgrab import Cache
from re import compile, DOTALL, MULTILINE, sub
from urlparse import urljoin
cache = Cache()
titlePattern = compile("<TITLE>(.*?)</TITLE>")
contentPattern = compile("(?:<BR>\s+<BLOCKQUOTE>|<H3 ALIGN=\"CENTER\">)(.+)</BLOCKQUOTE>.+?<A HREF=\"ancilpag.html#DP\">Dramatis personae</A>", DOTALL|MULTILINE) # <A HREF=\"[^\"]+\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>" , DOTALL|MULTILINE)
nextPattern = compile("<A HREF=\"([^\"]+)\"><IMG SRC=\"graphics/j-gate.jpg\" ALIGN=\"BOTTOM\" BORDER=\"0\" ALT=\"Into jump gate\"></A>")
volumePattern = compile("<A HREF=\"(v\dcont.html)\"><IMG HEIGHT=\"\d+\" WIDTH=\"\d+\" SRC=\"graphics/[^\"]+\" ALIGN=\"[^\"]+\" BORDER=\"0\" ALT=\"(Volume \d - [^\"]+)\">")
baseURL = "http://www.b5-dark-mirror.co.uk/"
page = cache.get(baseURL, max_age = -1)
data = page.read()
volumes = sorted(volumePattern.findall(data))
for volumeUrl, volumeTitle in volumes:
print volumeTitle
title = "A Dark, Distorted Mirror: " + volumeTitle
toc = tocStart(title)
volumePage = cache.get(urljoin(baseURL, volumeUrl), max_age = -1).read()
splitPattern = compile("<HR><A NAME=\"P[A-Z\d]\"></A>")
chapterPattern = compile("<A HREF=\"(v\dp.+?.html)\">Chapter (\d+)</A>")
partPattern = compile(" SIZE=\"\+1\">(.*?)</FONT>")
for section in splitPattern.split(volumePage):
chapters = chapterPattern.findall(section)
if chapters == []:
continue
part = sub('<[^>]*>', '', partPattern.findall(section)[0])
print "\t", part, chapters
for chapterUrl, _ in chapters:
url = urljoin(baseURL, chapterUrl)
print url
chapterPage = cache.get(url, max_age = -1).read()
content = contentPattern.search(chapterPage).groups()[0]
chapterTitle = titlePattern.search(chapterPage).groups()[0]
generatePage(url, chapterTitle, content, title, toc)
tocEnd(toc)
makeMobi(title, "Gareth Williams")
#break
|
|
6d6a566d93784022e5b769fc9d26b0f56ac3f18d
|
bin/2000/shape_msa_tract.py
|
bin/2000/shape_msa_tract.py
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to tracts crosswalk
#
msa_to_tract = {}
with open('data/2000/crosswalks/tract.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
tract = rows[1]
if msa not in msa_to_tract:
msa_to_tract[msa] = []
msa_to_tract[msa].append(tract)
#
# Perform the extraction
#
for msa in msa_to_tract:
states = list(set([b[:2] for b in msa_to_tract[msa]]))
## Get all blockgroups
all_tract = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/tracts.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_tract[f['properties']['CTIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_tract = {tract: all_bg[tract] for tract in msa_to_tract[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
path = 'data/2000/shp/msa/%s/tracts.shp'%msa
schema = {'geometry': 'Polygon',
'properties': {'CTIDFP00': 'str'}}
with fiona.open(path, 'w','ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for tract in msa_tract:
rec = {'geometry':msa_tract[tract], 'properties':{'CTIDFP00':tract}}
output.write(rec)
|
Add script to extract the tracts shapes in 2000 MSA
|
Add script to extract the tracts shapes in 2000 MSA
|
Python
|
bsd-2-clause
|
scities/2000-us-metro-atlas
|
Add script to extract the tracts shapes in 2000 MSA
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to tracts crosswalk
#
msa_to_tract = {}
with open('data/2000/crosswalks/tract.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
tract = rows[1]
if msa not in msa_to_tract:
msa_to_tract[msa] = []
msa_to_tract[msa].append(tract)
#
# Perform the extraction
#
for msa in msa_to_tract:
states = list(set([b[:2] for b in msa_to_tract[msa]]))
## Get all blockgroups
all_tract = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/tracts.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_tract[f['properties']['CTIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_tract = {tract: all_bg[tract] for tract in msa_to_tract[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
path = 'data/2000/shp/msa/%s/tracts.shp'%msa
schema = {'geometry': 'Polygon',
'properties': {'CTIDFP00': 'str'}}
with fiona.open(path, 'w','ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for tract in msa_tract:
rec = {'geometry':msa_tract[tract], 'properties':{'CTIDFP00':tract}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the tracts shapes in 2000 MSA<commit_after>
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to tracts crosswalk
#
msa_to_tract = {}
with open('data/2000/crosswalks/tract.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
tract = rows[1]
if msa not in msa_to_tract:
msa_to_tract[msa] = []
msa_to_tract[msa].append(tract)
#
# Perform the extraction
#
for msa in msa_to_tract:
states = list(set([b[:2] for b in msa_to_tract[msa]]))
## Get all blockgroups
all_tract = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/tracts.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_tract[f['properties']['CTIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_tract = {tract: all_bg[tract] for tract in msa_to_tract[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
path = 'data/2000/shp/msa/%s/tracts.shp'%msa
schema = {'geometry': 'Polygon',
'properties': {'CTIDFP00': 'str'}}
with fiona.open(path, 'w','ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for tract in msa_tract:
rec = {'geometry':msa_tract[tract], 'properties':{'CTIDFP00':tract}}
output.write(rec)
|
Add script to extract the tracts shapes in 2000 MSA"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to tracts crosswalk
#
msa_to_tract = {}
with open('data/2000/crosswalks/tract.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
tract = rows[1]
if msa not in msa_to_tract:
msa_to_tract[msa] = []
msa_to_tract[msa].append(tract)
#
# Perform the extraction
#
for msa in msa_to_tract:
states = list(set([b[:2] for b in msa_to_tract[msa]]))
## Get all blockgroups
all_tract = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/tracts.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_tract[f['properties']['CTIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_tract = {tract: all_bg[tract] for tract in msa_to_tract[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
path = 'data/2000/shp/msa/%s/tracts.shp'%msa
schema = {'geometry': 'Polygon',
'properties': {'CTIDFP00': 'str'}}
with fiona.open(path, 'w','ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for tract in msa_tract:
rec = {'geometry':msa_tract[tract], 'properties':{'CTIDFP00':tract}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the tracts shapes in 2000 MSA<commit_after>"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to tracts crosswalk
#
msa_to_tract = {}
with open('data/2000/crosswalks/tract.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
tract = rows[1]
if msa not in msa_to_tract:
msa_to_tract[msa] = []
msa_to_tract[msa].append(tract)
#
# Perform the extraction
#
for msa in msa_to_tract:
states = list(set([b[:2] for b in msa_to_tract[msa]]))
## Get all blockgroups
all_tract = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/tracts.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_tract[f['properties']['CTIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_tract = {tract: all_bg[tract] for tract in msa_to_tract[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
path = 'data/2000/shp/msa/%s/tracts.shp'%msa
schema = {'geometry': 'Polygon',
'properties': {'CTIDFP00': 'str'}}
with fiona.open(path, 'w','ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for tract in msa_tract:
rec = {'geometry':msa_tract[tract], 'properties':{'CTIDFP00':tract}}
output.write(rec)
|
|
72437b0ed11bfdc5ff82aeabf69130e683ddeb43
|
numba/cuda/tests/cudapy/test_array_methods.py
|
numba/cuda/tests/cudapy/test_array_methods.py
|
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import cuda
def reinterpret_array_type(byte_arr, start, stop, output):
# Tested with just one thread
val = byte_arr[start:stop].view(np.int32)[0]
output[0] = val
class TestCudaArrayMethods(unittest.TestCase):
def test_reinterpret_array_type(self):
"""
Reinterpret byte array as int32 in the GPU.
"""
pyfunc = reinterpret_array_type
kernel = cuda.jit(pyfunc)
byte_arr = np.arange(256, dtype=np.uint8)
itemsize = np.dtype(np.int32).itemsize
for start in range(0, 256, itemsize):
stop = start + itemsize
expect = byte_arr[start:stop].view(np.int32)[0]
output = np.zeros(1, dtype=np.int32)
kernel[1, 1](byte_arr, start, stop, output)
got = output[0]
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
Add CUDA GPU test for the .view() array method
|
Add CUDA GPU test for the .view() array method
|
Python
|
bsd-2-clause
|
pombredanne/numba,stuartarchibald/numba,GaZ3ll3/numba,pitrou/numba,pombredanne/numba,seibert/numba,cpcloud/numba,gdementen/numba,cpcloud/numba,gmarkall/numba,pitrou/numba,jriehl/numba,IntelLabs/numba,GaZ3ll3/numba,GaZ3ll3/numba,stefanseefeld/numba,stefanseefeld/numba,IntelLabs/numba,IntelLabs/numba,gmarkall/numba,ssarangi/numba,gmarkall/numba,sklam/numba,gmarkall/numba,gdementen/numba,ssarangi/numba,cpcloud/numba,pitrou/numba,sklam/numba,GaZ3ll3/numba,numba/numba,pitrou/numba,stonebig/numba,ssarangi/numba,stefanseefeld/numba,jriehl/numba,IntelLabs/numba,jriehl/numba,stonebig/numba,stuartarchibald/numba,stonebig/numba,stuartarchibald/numba,sklam/numba,jriehl/numba,seibert/numba,ssarangi/numba,pombredanne/numba,seibert/numba,pombredanne/numba,ssarangi/numba,sklam/numba,jriehl/numba,GaZ3ll3/numba,stefanseefeld/numba,numba/numba,seibert/numba,stefanseefeld/numba,stonebig/numba,stonebig/numba,gdementen/numba,gdementen/numba,cpcloud/numba,gmarkall/numba,sklam/numba,cpcloud/numba,numba/numba,stuartarchibald/numba,gdementen/numba,numba/numba,pitrou/numba,numba/numba,stuartarchibald/numba,seibert/numba,pombredanne/numba,IntelLabs/numba
|
Add CUDA GPU test for the .view() array method
|
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import cuda
def reinterpret_array_type(byte_arr, start, stop, output):
# Tested with just one thread
val = byte_arr[start:stop].view(np.int32)[0]
output[0] = val
class TestCudaArrayMethods(unittest.TestCase):
def test_reinterpret_array_type(self):
"""
Reinterpret byte array as int32 in the GPU.
"""
pyfunc = reinterpret_array_type
kernel = cuda.jit(pyfunc)
byte_arr = np.arange(256, dtype=np.uint8)
itemsize = np.dtype(np.int32).itemsize
for start in range(0, 256, itemsize):
stop = start + itemsize
expect = byte_arr[start:stop].view(np.int32)[0]
output = np.zeros(1, dtype=np.int32)
kernel[1, 1](byte_arr, start, stop, output)
got = output[0]
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add CUDA GPU test for the .view() array method<commit_after>
|
from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import cuda
def reinterpret_array_type(byte_arr, start, stop, output):
# Tested with just one thread
val = byte_arr[start:stop].view(np.int32)[0]
output[0] = val
class TestCudaArrayMethods(unittest.TestCase):
def test_reinterpret_array_type(self):
"""
Reinterpret byte array as int32 in the GPU.
"""
pyfunc = reinterpret_array_type
kernel = cuda.jit(pyfunc)
byte_arr = np.arange(256, dtype=np.uint8)
itemsize = np.dtype(np.int32).itemsize
for start in range(0, 256, itemsize):
stop = start + itemsize
expect = byte_arr[start:stop].view(np.int32)[0]
output = np.zeros(1, dtype=np.int32)
kernel[1, 1](byte_arr, start, stop, output)
got = output[0]
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
Add CUDA GPU test for the .view() array methodfrom __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import cuda
def reinterpret_array_type(byte_arr, start, stop, output):
# Tested with just one thread
val = byte_arr[start:stop].view(np.int32)[0]
output[0] = val
class TestCudaArrayMethods(unittest.TestCase):
def test_reinterpret_array_type(self):
"""
Reinterpret byte array as int32 in the GPU.
"""
pyfunc = reinterpret_array_type
kernel = cuda.jit(pyfunc)
byte_arr = np.arange(256, dtype=np.uint8)
itemsize = np.dtype(np.int32).itemsize
for start in range(0, 256, itemsize):
stop = start + itemsize
expect = byte_arr[start:stop].view(np.int32)[0]
output = np.zeros(1, dtype=np.int32)
kernel[1, 1](byte_arr, start, stop, output)
got = output[0]
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add CUDA GPU test for the .view() array method<commit_after>from __future__ import print_function, absolute_import, division
from numba import unittest_support as unittest
import numpy as np
from numba import cuda
def reinterpret_array_type(byte_arr, start, stop, output):
# Tested with just one thread
val = byte_arr[start:stop].view(np.int32)[0]
output[0] = val
class TestCudaArrayMethods(unittest.TestCase):
def test_reinterpret_array_type(self):
"""
Reinterpret byte array as int32 in the GPU.
"""
pyfunc = reinterpret_array_type
kernel = cuda.jit(pyfunc)
byte_arr = np.arange(256, dtype=np.uint8)
itemsize = np.dtype(np.int32).itemsize
for start in range(0, 256, itemsize):
stop = start + itemsize
expect = byte_arr[start:stop].view(np.int32)[0]
output = np.zeros(1, dtype=np.int32)
kernel[1, 1](byte_arr, start, stop, output)
got = output[0]
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
|
|
0b436265ad984f8750098e56712bb3ac8f917e86
|
scripts/make_segue_subsample.py
|
scripts/make_segue_subsample.py
|
from astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
sspp = fits.open("/Users/adrian/projects/segue-learn/data/ssppOut-dr9.fits")
all_data = sspp[1].data
best_data = all_data[all_data['FLAG'] == 'nnnnn']
best_data = best_data[(best_data['RV_ADOP'] != -9999) & \
(best_data['FEH_ADOP'] != -9999) & \
(best_data['DIST_AP'] != -9999)]
idx = np.random.randint(len(best_data), size=5000)
subsample = best_data[idx]
d = Table(best_data)['RA', 'DEC', 'L', 'B', 'RV_ADOP', 'TEFF_ADOP', 'LOGG_ADOP', 'FEH_ADOP']
ascii.write(d, "segue_sample.csv", delimiter=",")
|
Add script for selecting a segue subsample
|
Add script for selecting a segue subsample
|
Python
|
bsd-3-clause
|
adrn/d3po,adrn/d3po,adrn/d3po
|
Add script for selecting a segue subsample
|
from astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
sspp = fits.open("/Users/adrian/projects/segue-learn/data/ssppOut-dr9.fits")
all_data = sspp[1].data
best_data = all_data[all_data['FLAG'] == 'nnnnn']
best_data = best_data[(best_data['RV_ADOP'] != -9999) & \
(best_data['FEH_ADOP'] != -9999) & \
(best_data['DIST_AP'] != -9999)]
idx = np.random.randint(len(best_data), size=5000)
subsample = best_data[idx]
d = Table(best_data)['RA', 'DEC', 'L', 'B', 'RV_ADOP', 'TEFF_ADOP', 'LOGG_ADOP', 'FEH_ADOP']
ascii.write(d, "segue_sample.csv", delimiter=",")
|
<commit_before><commit_msg>Add script for selecting a segue subsample<commit_after>
|
from astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
sspp = fits.open("/Users/adrian/projects/segue-learn/data/ssppOut-dr9.fits")
all_data = sspp[1].data
best_data = all_data[all_data['FLAG'] == 'nnnnn']
best_data = best_data[(best_data['RV_ADOP'] != -9999) & \
(best_data['FEH_ADOP'] != -9999) & \
(best_data['DIST_AP'] != -9999)]
idx = np.random.randint(len(best_data), size=5000)
subsample = best_data[idx]
d = Table(best_data)['RA', 'DEC', 'L', 'B', 'RV_ADOP', 'TEFF_ADOP', 'LOGG_ADOP', 'FEH_ADOP']
ascii.write(d, "segue_sample.csv", delimiter=",")
|
Add script for selecting a segue subsamplefrom astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
sspp = fits.open("/Users/adrian/projects/segue-learn/data/ssppOut-dr9.fits")
all_data = sspp[1].data
best_data = all_data[all_data['FLAG'] == 'nnnnn']
best_data = best_data[(best_data['RV_ADOP'] != -9999) & \
(best_data['FEH_ADOP'] != -9999) & \
(best_data['DIST_AP'] != -9999)]
idx = np.random.randint(len(best_data), size=5000)
subsample = best_data[idx]
d = Table(best_data)['RA', 'DEC', 'L', 'B', 'RV_ADOP', 'TEFF_ADOP', 'LOGG_ADOP', 'FEH_ADOP']
ascii.write(d, "segue_sample.csv", delimiter=",")
|
<commit_before><commit_msg>Add script for selecting a segue subsample<commit_after>from astropy.io import fits, ascii
from astropy.table import Table
import numpy as np
sspp = fits.open("/Users/adrian/projects/segue-learn/data/ssppOut-dr9.fits")
all_data = sspp[1].data
best_data = all_data[all_data['FLAG'] == 'nnnnn']
best_data = best_data[(best_data['RV_ADOP'] != -9999) & \
(best_data['FEH_ADOP'] != -9999) & \
(best_data['DIST_AP'] != -9999)]
idx = np.random.randint(len(best_data), size=5000)
subsample = best_data[idx]
d = Table(best_data)['RA', 'DEC', 'L', 'B', 'RV_ADOP', 'TEFF_ADOP', 'LOGG_ADOP', 'FEH_ADOP']
ascii.write(d, "segue_sample.csv", delimiter=",")
|
|
c4ff6052f6e4fd545ea1c7cb0cd2a53a28ed001d
|
scripts/state_and_transition.py
|
scripts/state_and_transition.py
|
#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# States and Transitions
from auto_number import AutoNumber
class STATE(AutoNumber):
Start = ()
Following_waypoint = ()
Avoiding_obstacle = ()
Driving_toward_cone = ()
Driving_away_from_cone = ()
Success = ()
Failure = ()
End = ()
class TRANSITION(AutoNumber):
obstacle_seen = ()
near_cone = ()
obstacle_cleared = ()
touched_cone = ()
passed_cone = ()
segment_timeout = ()
touched_last_cone = ()
passed_last_cone = ()
course_timeout = ()
|
Move state and transition enums file for re-use
|
Move state and transition enums file for re-use
|
Python
|
apache-2.0
|
ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan,ProgrammingRobotsStudyGroup/robo_magellan
|
Move state and transition enums file for re-use
|
#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# States and Transitions
from auto_number import AutoNumber
class STATE(AutoNumber):
Start = ()
Following_waypoint = ()
Avoiding_obstacle = ()
Driving_toward_cone = ()
Driving_away_from_cone = ()
Success = ()
Failure = ()
End = ()
class TRANSITION(AutoNumber):
obstacle_seen = ()
near_cone = ()
obstacle_cleared = ()
touched_cone = ()
passed_cone = ()
segment_timeout = ()
touched_last_cone = ()
passed_last_cone = ()
course_timeout = ()
|
<commit_before><commit_msg>Move state and transition enums file for re-use<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# States and Transitions
from auto_number import AutoNumber
class STATE(AutoNumber):
Start = ()
Following_waypoint = ()
Avoiding_obstacle = ()
Driving_toward_cone = ()
Driving_away_from_cone = ()
Success = ()
Failure = ()
End = ()
class TRANSITION(AutoNumber):
obstacle_seen = ()
near_cone = ()
obstacle_cleared = ()
touched_cone = ()
passed_cone = ()
segment_timeout = ()
touched_last_cone = ()
passed_last_cone = ()
course_timeout = ()
|
Move state and transition enums file for re-use#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# States and Transitions
from auto_number import AutoNumber
class STATE(AutoNumber):
Start = ()
Following_waypoint = ()
Avoiding_obstacle = ()
Driving_toward_cone = ()
Driving_away_from_cone = ()
Success = ()
Failure = ()
End = ()
class TRANSITION(AutoNumber):
obstacle_seen = ()
near_cone = ()
obstacle_cleared = ()
touched_cone = ()
passed_cone = ()
segment_timeout = ()
touched_last_cone = ()
passed_last_cone = ()
course_timeout = ()
|
<commit_before><commit_msg>Move state and transition enums file for re-use<commit_after>#!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# States and Transitions
from auto_number import AutoNumber
class STATE(AutoNumber):
Start = ()
Following_waypoint = ()
Avoiding_obstacle = ()
Driving_toward_cone = ()
Driving_away_from_cone = ()
Success = ()
Failure = ()
End = ()
class TRANSITION(AutoNumber):
obstacle_seen = ()
near_cone = ()
obstacle_cleared = ()
touched_cone = ()
passed_cone = ()
segment_timeout = ()
touched_last_cone = ()
passed_last_cone = ()
course_timeout = ()
|
|
2b0c7304c1372997bc226d255ef22cc31f56f6fa
|
caminae/core/management.py
|
caminae/core/management.py
|
# http://djangosnippets.org/snippets/2311/
# Ensure South will update our custom SQL during a call to `migrate`.
from south.signals import post_migrate
def run_initial_sql(sender, **kwargs):
app_label = kwargs.get('app')
import os
from django.db import connection, transaction, models
app_dir = os.path.normpath(os.path.join(os.path.dirname(
models.get_app(app_label).__file__), 'sql'))
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)),
os.path.join(app_dir, "%s.sql" % app_label)]
cursor = connection.cursor()
for sql_file in sql_files:
try:
if os.path.exists(sql_file):
print "Loading initial SQL data from '%s'" % sql_file
f = open(sql_file)
sql = f.read()
f.close()
cursor.execute(sql)
except Exception, e:
import sys
sys.stderr.write("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
import traceback
traceback.print_exc()
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
post_migrate.connect(run_initial_sql)
|
Enable auto-loading of raw SQL during South migration
|
Enable auto-loading of raw SQL during South migration
|
Python
|
bsd-2-clause
|
makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,camillemonchicourt/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,makinacorpus/Geotrek,Anaethelion/Geotrek,johan--/Geotrek,mabhub/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,camillemonchicourt/Geotrek,makinacorpus/Geotrek
|
Enable auto-loading of raw SQL during South migration
|
# http://djangosnippets.org/snippets/2311/
# Ensure South will update our custom SQL during a call to `migrate`.
from south.signals import post_migrate
def run_initial_sql(sender, **kwargs):
app_label = kwargs.get('app')
import os
from django.db import connection, transaction, models
app_dir = os.path.normpath(os.path.join(os.path.dirname(
models.get_app(app_label).__file__), 'sql'))
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)),
os.path.join(app_dir, "%s.sql" % app_label)]
cursor = connection.cursor()
for sql_file in sql_files:
try:
if os.path.exists(sql_file):
print "Loading initial SQL data from '%s'" % sql_file
f = open(sql_file)
sql = f.read()
f.close()
cursor.execute(sql)
except Exception, e:
import sys
sys.stderr.write("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
import traceback
traceback.print_exc()
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
post_migrate.connect(run_initial_sql)
|
<commit_before><commit_msg>Enable auto-loading of raw SQL during South migration<commit_after>
|
# http://djangosnippets.org/snippets/2311/
# Ensure South will update our custom SQL during a call to `migrate`.
from south.signals import post_migrate
def run_initial_sql(sender, **kwargs):
app_label = kwargs.get('app')
import os
from django.db import connection, transaction, models
app_dir = os.path.normpath(os.path.join(os.path.dirname(
models.get_app(app_label).__file__), 'sql'))
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)),
os.path.join(app_dir, "%s.sql" % app_label)]
cursor = connection.cursor()
for sql_file in sql_files:
try:
if os.path.exists(sql_file):
print "Loading initial SQL data from '%s'" % sql_file
f = open(sql_file)
sql = f.read()
f.close()
cursor.execute(sql)
except Exception, e:
import sys
sys.stderr.write("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
import traceback
traceback.print_exc()
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
post_migrate.connect(run_initial_sql)
|
Enable auto-loading of raw SQL during South migration# http://djangosnippets.org/snippets/2311/
# Ensure South will update our custom SQL during a call to `migrate`.
from south.signals import post_migrate
def run_initial_sql(sender, **kwargs):
app_label = kwargs.get('app')
import os
from django.db import connection, transaction, models
app_dir = os.path.normpath(os.path.join(os.path.dirname(
models.get_app(app_label).__file__), 'sql'))
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)),
os.path.join(app_dir, "%s.sql" % app_label)]
cursor = connection.cursor()
for sql_file in sql_files:
try:
if os.path.exists(sql_file):
print "Loading initial SQL data from '%s'" % sql_file
f = open(sql_file)
sql = f.read()
f.close()
cursor.execute(sql)
except Exception, e:
import sys
sys.stderr.write("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
import traceback
traceback.print_exc()
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
post_migrate.connect(run_initial_sql)
|
<commit_before><commit_msg>Enable auto-loading of raw SQL during South migration<commit_after># http://djangosnippets.org/snippets/2311/
# Ensure South will update our custom SQL during a call to `migrate`.
from south.signals import post_migrate
def run_initial_sql(sender, **kwargs):
app_label = kwargs.get('app')
import os
from django.db import connection, transaction, models
app_dir = os.path.normpath(os.path.join(os.path.dirname(
models.get_app(app_label).__file__), 'sql'))
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)),
os.path.join(app_dir, "%s.sql" % app_label)]
cursor = connection.cursor()
for sql_file in sql_files:
try:
if os.path.exists(sql_file):
print "Loading initial SQL data from '%s'" % sql_file
f = open(sql_file)
sql = f.read()
f.close()
cursor.execute(sql)
except Exception, e:
import sys
sys.stderr.write("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
import traceback
traceback.print_exc()
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
post_migrate.connect(run_initial_sql)
|
|
eb9bae2803876f93957e6e2bdd00e57c83cff567
|
pikos/monitors/focused_monitor_attach.py
|
pikos/monitors/focused_monitor_attach.py
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/monitor_attach.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from pikos.monitors.monitor_attach import MonitorAttach
class FocusedMonitorAttach(MonitorAttach):
""" The monitor attach decorator for focused monitors.
This class provides the option for the decorated function to be added in
the set of functions where the monitor will focus.
"""
def __init__(self, obj, include_decorated=False):
""" Class initialization.
Parameters
----------
obj : object
A contect manager to monitor, inspect or profile the decorated
function while it is executed.
include_decorated : boolean
If the decorated function should be included into the list of
focused functions. Default is False.
"""
super(FocusedMonitorAttach, self).__init__(obj)
self._include_decorated = include_decorated
def _wrap_function(self, function):
""" Wrap a normal callable object.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_function(function)
def _wrap_generator(self, function):
""" Wrap a generator function.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_generator(function)
def _add_function(self, function):
functions = self._monitor_object.functions
functions.add(function)
|
Add a new MonitorAttach class for Focused monitors
|
Add a new MonitorAttach class for Focused monitors
|
Python
|
bsd-3-clause
|
enthought/pikos,enthought/pikos,enthought/pikos
|
Add a new MonitorAttach class for Focused monitors
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/monitor_attach.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from pikos.monitors.monitor_attach import MonitorAttach
class FocusedMonitorAttach(MonitorAttach):
""" The monitor attach decorator for focused monitors.
This class provides the option for the decorated function to be added in
the set of functions where the monitor will focus.
"""
def __init__(self, obj, include_decorated=False):
""" Class initialization.
Parameters
----------
obj : object
A contect manager to monitor, inspect or profile the decorated
function while it is executed.
include_decorated : boolean
If the decorated function should be included into the list of
focused functions. Default is False.
"""
super(FocusedMonitorAttach, self).__init__(obj)
self._include_decorated = include_decorated
def _wrap_function(self, function):
""" Wrap a normal callable object.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_function(function)
def _wrap_generator(self, function):
""" Wrap a generator function.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_generator(function)
def _add_function(self, function):
functions = self._monitor_object.functions
functions.add(function)
|
<commit_before><commit_msg>Add a new MonitorAttach class for Focused monitors<commit_after>
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/monitor_attach.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from pikos.monitors.monitor_attach import MonitorAttach
class FocusedMonitorAttach(MonitorAttach):
""" The monitor attach decorator for focused monitors.
This class provides the option for the decorated function to be added in
the set of functions where the monitor will focus.
"""
def __init__(self, obj, include_decorated=False):
""" Class initialization.
Parameters
----------
obj : object
A contect manager to monitor, inspect or profile the decorated
function while it is executed.
include_decorated : boolean
If the decorated function should be included into the list of
focused functions. Default is False.
"""
super(FocusedMonitorAttach, self).__init__(obj)
self._include_decorated = include_decorated
def _wrap_function(self, function):
""" Wrap a normal callable object.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_function(function)
def _wrap_generator(self, function):
""" Wrap a generator function.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_generator(function)
def _add_function(self, function):
functions = self._monitor_object.functions
functions.add(function)
|
Add a new MonitorAttach class for Focused monitors# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/monitor_attach.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from pikos.monitors.monitor_attach import MonitorAttach
class FocusedMonitorAttach(MonitorAttach):
""" The monitor attach decorator for focused monitors.
This class provides the option for the decorated function to be added in
the set of functions where the monitor will focus.
"""
def __init__(self, obj, include_decorated=False):
""" Class initialization.
Parameters
----------
obj : object
A contect manager to monitor, inspect or profile the decorated
function while it is executed.
include_decorated : boolean
If the decorated function should be included into the list of
focused functions. Default is False.
"""
super(FocusedMonitorAttach, self).__init__(obj)
self._include_decorated = include_decorated
def _wrap_function(self, function):
""" Wrap a normal callable object.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_function(function)
def _wrap_generator(self, function):
""" Wrap a generator function.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_generator(function)
def _add_function(self, function):
functions = self._monitor_object.functions
functions.add(function)
|
<commit_before><commit_msg>Add a new MonitorAttach class for Focused monitors<commit_after># -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/monitor_attach.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from pikos.monitors.monitor_attach import MonitorAttach
class FocusedMonitorAttach(MonitorAttach):
""" The monitor attach decorator for focused monitors.
This class provides the option for the decorated function to be added in
the set of functions where the monitor will focus.
"""
def __init__(self, obj, include_decorated=False):
""" Class initialization.
Parameters
----------
obj : object
A contect manager to monitor, inspect or profile the decorated
function while it is executed.
include_decorated : boolean
If the decorated function should be included into the list of
focused functions. Default is False.
"""
super(FocusedMonitorAttach, self).__init__(obj)
self._include_decorated = include_decorated
def _wrap_function(self, function):
""" Wrap a normal callable object.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_function(function)
def _wrap_generator(self, function):
""" Wrap a generator function.
"""
if self._include_decorated:
self._add_function(function)
return super(FocusedMonitorAttach, self)._wrap_generator(function)
def _add_function(self, function):
functions = self._monitor_object.functions
functions.add(function)
|
|
f4e49e2ee6b8ae7b1ab5132b3f900b0002acda54
|
docs/source/powerline_autodoc.py
|
docs/source/powerline_autodoc.py
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.lib.threaded import ThreadedSegment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, ThreadedSegment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.segments import Segment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, Segment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
Make powerline autodoc add all Segments
|
Make powerline autodoc add all Segments
|
Python
|
mit
|
lukw00/powerline,xfumihiro/powerline,dragon788/powerline,prvnkumar/powerline,magus424/powerline,S0lll0s/powerline,QuLogic/powerline,EricSB/powerline,xxxhycl2010/powerline,lukw00/powerline,firebitsbr/powerline,russellb/powerline,bartvm/powerline,dragon788/powerline,prvnkumar/powerline,S0lll0s/powerline,bartvm/powerline,russellb/powerline,IvanAli/powerline,S0lll0s/powerline,xxxhycl2010/powerline,bezhermoso/powerline,EricSB/powerline,s0undt3ch/powerline,kenrachynski/powerline,wfscheper/powerline,seanfisk/powerline,Liangjianghao/powerline,s0undt3ch/powerline,junix/powerline,Liangjianghao/powerline,dragon788/powerline,lukw00/powerline,darac/powerline,wfscheper/powerline,kenrachynski/powerline,bartvm/powerline,bezhermoso/powerline,russellb/powerline,xxxhycl2010/powerline,junix/powerline,darac/powerline,wfscheper/powerline,areteix/powerline,cyrixhero/powerline,darac/powerline,xfumihiro/powerline,s0undt3ch/powerline,seanfisk/powerline,blindFS/powerline,EricSB/powerline,blindFS/powerline,firebitsbr/powerline,DoctorJellyface/powerline,areteix/powerline,kenrachynski/powerline,prvnkumar/powerline,DoctorJellyface/powerline,xfumihiro/powerline,Luffin/powerline,QuLogic/powerline,cyrixhero/powerline,IvanAli/powerline,magus424/powerline,DoctorJellyface/powerline,Luffin/powerline,QuLogic/powerline,blindFS/powerline,areteix/powerline,magus424/powerline,firebitsbr/powerline,IvanAli/powerline,cyrixhero/powerline,Liangjianghao/powerline,junix/powerline,seanfisk/powerline,Luffin/powerline,bezhermoso/powerline
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.lib.threaded import ThreadedSegment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, ThreadedSegment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
Make powerline autodoc add all Segments
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.segments import Segment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, Segment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
<commit_before># vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.lib.threaded import ThreadedSegment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, ThreadedSegment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
<commit_msg>Make powerline autodoc add all Segments<commit_after>
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.segments import Segment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, Segment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.lib.threaded import ThreadedSegment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, ThreadedSegment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
Make powerline autodoc add all Segments# vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.segments import Segment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, Segment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
<commit_before># vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.lib.threaded import ThreadedSegment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, ThreadedSegment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
<commit_msg>Make powerline autodoc add all Segments<commit_after># vim:fileencoding=utf-8:noet
from sphinx.ext import autodoc
from inspect import formatargspec
from powerline.lint.inspect import getconfigargspec
from powerline.segments import Segment
try:
from __builtin__ import unicode
except ImportError:
unicode = lambda s, enc: s # NOQA
def formatvalue(val):
if type(val) is str:
return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"'
else:
return '=' + repr(val)
class ThreadedDocumenter(autodoc.FunctionDocumenter):
'''Specialized documenter subclass for ThreadedSegment subclasses.'''
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return (isinstance(member, Segment) or
super(ThreadedDocumenter, cls).can_document_member(member, membername, isattr, parent))
def format_args(self):
argspec = getconfigargspec(self.object)
return formatargspec(*argspec, formatvalue=formatvalue).replace('\\', '\\\\')
def setup(app):
autodoc.setup(app)
app.add_autodocumenter(ThreadedDocumenter)
|
7d7a3a6a71dccec3c44fe8f1dd6576423d2ca278
|
formalization/coq2latex.py
|
formalization/coq2latex.py
|
#!/usr/bin/python
import sys
import re
from string import Template
def rule2latex(name, premises, conclusion):
return Template(
r"""\newcommand{\rl$name}{\referTo{$name}{rul:$name}}
\newcommand{\show$name}{%
\infer[\rulename{$name}]
{$premises}
{$conclusion}
}""").substitute({"name" : name,
"premises" : r" \\ ".join(premises),
"conclusion" : conclusion})
### MAIN PROGRAM
# load the source file
with open(sys.argv[1], "r") as f:
src = f.read()
for rule in re.findall(
r'^\s*\|\s+' # the beginning of a rule
r'(?P<rulename>\w+)\s*:\s*$' # rule name
r'\s*rule\s*' # header
r'(?P<rulebody>.*?)' # rule body
r'endrule', # footer
src,
re.DOTALL + re.MULTILINE):
rulename = rule[0]
rulebody = rule[1]
m = re.match(
r'^(\s*parameters:.*?,)?' # optional parameters
r'(?P<premises>.*?)' # premises
r'conclusion:\s*(?P<conclusion>.*)\s*$', # the rest is the rule
rulebody,
re.DOTALL)
if not m:
print ("Failed to parse rule {0} whose body is:\n{1}".format(rulename, rulebody))
assert False
premises = re.split(r'\s*premise:\s*', m.group('premises'))
if len(premises) > 1 and not premises[0]: premises = premises[1:]
conclusion = m.group('conclusion')
print ("\n===={0}====\n".format(rulename))
print (rule2latex(rulename, premises, conclusion))
|
Prepare for coq -> latex translation
|
Prepare for coq -> latex translation
|
Python
|
mit
|
TheoWinterhalter/formal-type-theory
|
Prepare for coq -> latex translation
|
#!/usr/bin/python
import sys
import re
from string import Template
def rule2latex(name, premises, conclusion):
return Template(
r"""\newcommand{\rl$name}{\referTo{$name}{rul:$name}}
\newcommand{\show$name}{%
\infer[\rulename{$name}]
{$premises}
{$conclusion}
}""").substitute({"name" : name,
"premises" : r" \\ ".join(premises),
"conclusion" : conclusion})
### MAIN PROGRAM
# load the source file
with open(sys.argv[1], "r") as f:
src = f.read()
for rule in re.findall(
r'^\s*\|\s+' # the beginning of a rule
r'(?P<rulename>\w+)\s*:\s*$' # rule name
r'\s*rule\s*' # header
r'(?P<rulebody>.*?)' # rule body
r'endrule', # footer
src,
re.DOTALL + re.MULTILINE):
rulename = rule[0]
rulebody = rule[1]
m = re.match(
r'^(\s*parameters:.*?,)?' # optional parameters
r'(?P<premises>.*?)' # premises
r'conclusion:\s*(?P<conclusion>.*)\s*$', # the rest is the rule
rulebody,
re.DOTALL)
if not m:
print ("Failed to parse rule {0} whose body is:\n{1}".format(rulename, rulebody))
assert False
premises = re.split(r'\s*premise:\s*', m.group('premises'))
if len(premises) > 1 and not premises[0]: premises = premises[1:]
conclusion = m.group('conclusion')
print ("\n===={0}====\n".format(rulename))
print (rule2latex(rulename, premises, conclusion))
|
<commit_before><commit_msg>Prepare for coq -> latex translation<commit_after>
|
#!/usr/bin/python
import sys
import re
from string import Template
def rule2latex(name, premises, conclusion):
return Template(
r"""\newcommand{\rl$name}{\referTo{$name}{rul:$name}}
\newcommand{\show$name}{%
\infer[\rulename{$name}]
{$premises}
{$conclusion}
}""").substitute({"name" : name,
"premises" : r" \\ ".join(premises),
"conclusion" : conclusion})
### MAIN PROGRAM
# load the source file
with open(sys.argv[1], "r") as f:
src = f.read()
for rule in re.findall(
r'^\s*\|\s+' # the beginning of a rule
r'(?P<rulename>\w+)\s*:\s*$' # rule name
r'\s*rule\s*' # header
r'(?P<rulebody>.*?)' # rule body
r'endrule', # footer
src,
re.DOTALL + re.MULTILINE):
rulename = rule[0]
rulebody = rule[1]
m = re.match(
r'^(\s*parameters:.*?,)?' # optional parameters
r'(?P<premises>.*?)' # premises
r'conclusion:\s*(?P<conclusion>.*)\s*$', # the rest is the rule
rulebody,
re.DOTALL)
if not m:
print ("Failed to parse rule {0} whose body is:\n{1}".format(rulename, rulebody))
assert False
premises = re.split(r'\s*premise:\s*', m.group('premises'))
if len(premises) > 1 and not premises[0]: premises = premises[1:]
conclusion = m.group('conclusion')
print ("\n===={0}====\n".format(rulename))
print (rule2latex(rulename, premises, conclusion))
|
Prepare for coq -> latex translation#!/usr/bin/python
import sys
import re
from string import Template
def rule2latex(name, premises, conclusion):
return Template(
r"""\newcommand{\rl$name}{\referTo{$name}{rul:$name}}
\newcommand{\show$name}{%
\infer[\rulename{$name}]
{$premises}
{$conclusion}
}""").substitute({"name" : name,
"premises" : r" \\ ".join(premises),
"conclusion" : conclusion})
### MAIN PROGRAM
# load the source file
with open(sys.argv[1], "r") as f:
src = f.read()
for rule in re.findall(
r'^\s*\|\s+' # the beginning of a rule
r'(?P<rulename>\w+)\s*:\s*$' # rule name
r'\s*rule\s*' # header
r'(?P<rulebody>.*?)' # rule body
r'endrule', # footer
src,
re.DOTALL + re.MULTILINE):
rulename = rule[0]
rulebody = rule[1]
m = re.match(
r'^(\s*parameters:.*?,)?' # optional parameters
r'(?P<premises>.*?)' # premises
r'conclusion:\s*(?P<conclusion>.*)\s*$', # the rest is the rule
rulebody,
re.DOTALL)
if not m:
print ("Failed to parse rule {0} whose body is:\n{1}".format(rulename, rulebody))
assert False
premises = re.split(r'\s*premise:\s*', m.group('premises'))
if len(premises) > 1 and not premises[0]: premises = premises[1:]
conclusion = m.group('conclusion')
print ("\n===={0}====\n".format(rulename))
print (rule2latex(rulename, premises, conclusion))
|
<commit_before><commit_msg>Prepare for coq -> latex translation<commit_after>#!/usr/bin/python
import sys
import re
from string import Template
def rule2latex(name, premises, conclusion):
return Template(
r"""\newcommand{\rl$name}{\referTo{$name}{rul:$name}}
\newcommand{\show$name}{%
\infer[\rulename{$name}]
{$premises}
{$conclusion}
}""").substitute({"name" : name,
"premises" : r" \\ ".join(premises),
"conclusion" : conclusion})
### MAIN PROGRAM
# load the source file
with open(sys.argv[1], "r") as f:
src = f.read()
for rule in re.findall(
r'^\s*\|\s+' # the beginning of a rule
r'(?P<rulename>\w+)\s*:\s*$' # rule name
r'\s*rule\s*' # header
r'(?P<rulebody>.*?)' # rule body
r'endrule', # footer
src,
re.DOTALL + re.MULTILINE):
rulename = rule[0]
rulebody = rule[1]
m = re.match(
r'^(\s*parameters:.*?,)?' # optional parameters
r'(?P<premises>.*?)' # premises
r'conclusion:\s*(?P<conclusion>.*)\s*$', # the rest is the rule
rulebody,
re.DOTALL)
if not m:
print ("Failed to parse rule {0} whose body is:\n{1}".format(rulename, rulebody))
assert False
premises = re.split(r'\s*premise:\s*', m.group('premises'))
if len(premises) > 1 and not premises[0]: premises = premises[1:]
conclusion = m.group('conclusion')
print ("\n===={0}====\n".format(rulename))
print (rule2latex(rulename, premises, conclusion))
|
|
9b9582a1b7226ceb9cc65657ffb7fd7d51c8ea2a
|
lib/exp/featx/__init__.py
|
lib/exp/featx/__init__.py
|
__all__ = []
from lib.exp.featx.base import Featx
from lib.exp.tools.slider import Slider
class SlideFeatx(Featx, Slider):
def __init__(self, root, name):
Featx.__init__(self, root, name)
Slider.__init__(self, root, name)
def get_feats(self):
imgl = self.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
|
__all__ = []
from lib.exp.featx.base import Feats
from lib.exp.tools.slider import Slider
from lib.exp.tools.video import Video
from lib.exp.prepare import Prepare
class Featx(Feats):
def __init__(self, root, name):
Feats.__init__(self, root, name)
def get_slide_feats(self):
ss = Slider(self.root, self.name)
imgl = ss.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
def get_frame_feats(self):
pp = Prepare(self.root, self.name)
vv = Video(self.root, self.name)
imgl = vv.get_frames(pp.frame_ids(), gray=True)
self.feats(imgl, prefix="f")
|
Change to use `featx` in package
|
Change to use `featx` in package
|
Python
|
agpl-3.0
|
speed-of-light/pyslider
|
__all__ = []
from lib.exp.featx.base import Featx
from lib.exp.tools.slider import Slider
class SlideFeatx(Featx, Slider):
def __init__(self, root, name):
Featx.__init__(self, root, name)
Slider.__init__(self, root, name)
def get_feats(self):
imgl = self.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
Change to use `featx` in package
|
__all__ = []
from lib.exp.featx.base import Feats
from lib.exp.tools.slider import Slider
from lib.exp.tools.video import Video
from lib.exp.prepare import Prepare
class Featx(Feats):
def __init__(self, root, name):
Feats.__init__(self, root, name)
def get_slide_feats(self):
ss = Slider(self.root, self.name)
imgl = ss.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
def get_frame_feats(self):
pp = Prepare(self.root, self.name)
vv = Video(self.root, self.name)
imgl = vv.get_frames(pp.frame_ids(), gray=True)
self.feats(imgl, prefix="f")
|
<commit_before>__all__ = []
from lib.exp.featx.base import Featx
from lib.exp.tools.slider import Slider
class SlideFeatx(Featx, Slider):
def __init__(self, root, name):
Featx.__init__(self, root, name)
Slider.__init__(self, root, name)
def get_feats(self):
imgl = self.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
<commit_msg>Change to use `featx` in package<commit_after>
|
__all__ = []
from lib.exp.featx.base import Feats
from lib.exp.tools.slider import Slider
from lib.exp.tools.video import Video
from lib.exp.prepare import Prepare
class Featx(Feats):
def __init__(self, root, name):
Feats.__init__(self, root, name)
def get_slide_feats(self):
ss = Slider(self.root, self.name)
imgl = ss.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
def get_frame_feats(self):
pp = Prepare(self.root, self.name)
vv = Video(self.root, self.name)
imgl = vv.get_frames(pp.frame_ids(), gray=True)
self.feats(imgl, prefix="f")
|
__all__ = []
from lib.exp.featx.base import Featx
from lib.exp.tools.slider import Slider
class SlideFeatx(Featx, Slider):
def __init__(self, root, name):
Featx.__init__(self, root, name)
Slider.__init__(self, root, name)
def get_feats(self):
imgl = self.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
Change to use `featx` in package__all__ = []
from lib.exp.featx.base import Feats
from lib.exp.tools.slider import Slider
from lib.exp.tools.video import Video
from lib.exp.prepare import Prepare
class Featx(Feats):
def __init__(self, root, name):
Feats.__init__(self, root, name)
def get_slide_feats(self):
ss = Slider(self.root, self.name)
imgl = ss.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
def get_frame_feats(self):
pp = Prepare(self.root, self.name)
vv = Video(self.root, self.name)
imgl = vv.get_frames(pp.frame_ids(), gray=True)
self.feats(imgl, prefix="f")
|
<commit_before>__all__ = []
from lib.exp.featx.base import Featx
from lib.exp.tools.slider import Slider
class SlideFeatx(Featx, Slider):
def __init__(self, root, name):
Featx.__init__(self, root, name)
Slider.__init__(self, root, name)
def get_feats(self):
imgl = self.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
<commit_msg>Change to use `featx` in package<commit_after>__all__ = []
from lib.exp.featx.base import Feats
from lib.exp.tools.slider import Slider
from lib.exp.tools.video import Video
from lib.exp.prepare import Prepare
class Featx(Feats):
def __init__(self, root, name):
Feats.__init__(self, root, name)
def get_slide_feats(self):
ss = Slider(self.root, self.name)
imgl = ss.get_slides(None, gray=True, resize=True)
self.feats(imgl, prefix="s")
def get_frame_feats(self):
pp = Prepare(self.root, self.name)
vv = Video(self.root, self.name)
imgl = vv.get_frames(pp.frame_ids(), gray=True)
self.feats(imgl, prefix="f")
|
8db49a2336e733479d8f1dd573b20807763c7681
|
gem/migrations/0021_commentcountrule.py
|
gem/migrations/0021_commentcountrule.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-19 11:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0012_remove_personalisablepagemetadata_is_segmented'),
('gem', '0020_profiledatarule'),
]
operations = [
migrations.CreateModel(
name='CommentCountRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('operator', models.CharField(choices=[(b'eq', 'equals'), (b'gt', 'greater than'), (b'lt', 'less than')], default=b'eq', max_length=3, verbose_name='operator')),
('count', models.IntegerField()),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_commentcountrule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Comment count rule',
},
),
]
|
Add migration for the segment
|
Add migration for the segment
|
Python
|
bsd-2-clause
|
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
|
Add migration for the segment
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-19 11:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0012_remove_personalisablepagemetadata_is_segmented'),
('gem', '0020_profiledatarule'),
]
operations = [
migrations.CreateModel(
name='CommentCountRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('operator', models.CharField(choices=[(b'eq', 'equals'), (b'gt', 'greater than'), (b'lt', 'less than')], default=b'eq', max_length=3, verbose_name='operator')),
('count', models.IntegerField()),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_commentcountrule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Comment count rule',
},
),
]
|
<commit_before><commit_msg>Add migration for the segment<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-19 11:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0012_remove_personalisablepagemetadata_is_segmented'),
('gem', '0020_profiledatarule'),
]
operations = [
migrations.CreateModel(
name='CommentCountRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('operator', models.CharField(choices=[(b'eq', 'equals'), (b'gt', 'greater than'), (b'lt', 'less than')], default=b'eq', max_length=3, verbose_name='operator')),
('count', models.IntegerField()),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_commentcountrule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Comment count rule',
},
),
]
|
Add migration for the segment# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-19 11:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0012_remove_personalisablepagemetadata_is_segmented'),
('gem', '0020_profiledatarule'),
]
operations = [
migrations.CreateModel(
name='CommentCountRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('operator', models.CharField(choices=[(b'eq', 'equals'), (b'gt', 'greater than'), (b'lt', 'less than')], default=b'eq', max_length=3, verbose_name='operator')),
('count', models.IntegerField()),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_commentcountrule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Comment count rule',
},
),
]
|
<commit_before><commit_msg>Add migration for the segment<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-10-19 11:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0012_remove_personalisablepagemetadata_is_segmented'),
('gem', '0020_profiledatarule'),
]
operations = [
migrations.CreateModel(
name='CommentCountRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('operator', models.CharField(choices=[(b'eq', 'equals'), (b'gt', 'greater than'), (b'lt', 'less than')], default=b'eq', max_length=3, verbose_name='operator')),
('count', models.IntegerField()),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_commentcountrule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Comment count rule',
},
),
]
|
|
41680da53d5059e3b1e2ce497916f26d4c06cf16
|
lowfat/management/commands/fixcw19.py
|
lowfat/management/commands/fixcw19.py
|
import datetime
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
class Command(BaseCommand):
help = "CW19 funding request"
def handle(self, *args, **options):
for claimant in Claimant.objects.filter(
application_year=2019
):
candidate = Claimant.objects.get(
email=claimant.email,
application_year=2018
)
for fund in Fund.objects.filter(
claimant=claimant
):
print("Transfering {} from {} to {} ...".format(
fund,
claimant,
candidate
)
)
fund.claimant = candidate
fund.save()
print("Transfer complete.")
print("Removing {} ...".format(claimant))
claimant.delete()
print("Remove successful.")
|
Add fix related with CW19
|
Add fix related with CW19
|
Python
|
bsd-3-clause
|
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
|
Add fix related with CW19
|
import datetime
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
class Command(BaseCommand):
help = "CW19 funding request"
def handle(self, *args, **options):
for claimant in Claimant.objects.filter(
application_year=2019
):
candidate = Claimant.objects.get(
email=claimant.email,
application_year=2018
)
for fund in Fund.objects.filter(
claimant=claimant
):
print("Transfering {} from {} to {} ...".format(
fund,
claimant,
candidate
)
)
fund.claimant = candidate
fund.save()
print("Transfer complete.")
print("Removing {} ...".format(claimant))
claimant.delete()
print("Remove successful.")
|
<commit_before><commit_msg>Add fix related with CW19<commit_after>
|
import datetime
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
class Command(BaseCommand):
help = "CW19 funding request"
def handle(self, *args, **options):
for claimant in Claimant.objects.filter(
application_year=2019
):
candidate = Claimant.objects.get(
email=claimant.email,
application_year=2018
)
for fund in Fund.objects.filter(
claimant=claimant
):
print("Transfering {} from {} to {} ...".format(
fund,
claimant,
candidate
)
)
fund.claimant = candidate
fund.save()
print("Transfer complete.")
print("Removing {} ...".format(claimant))
claimant.delete()
print("Remove successful.")
|
Add fix related with CW19import datetime
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
class Command(BaseCommand):
help = "CW19 funding request"
def handle(self, *args, **options):
for claimant in Claimant.objects.filter(
application_year=2019
):
candidate = Claimant.objects.get(
email=claimant.email,
application_year=2018
)
for fund in Fund.objects.filter(
claimant=claimant
):
print("Transfering {} from {} to {} ...".format(
fund,
claimant,
candidate
)
)
fund.claimant = candidate
fund.save()
print("Transfer complete.")
print("Removing {} ...".format(claimant))
claimant.delete()
print("Remove successful.")
|
<commit_before><commit_msg>Add fix related with CW19<commit_after>import datetime
from django.core.management.base import BaseCommand
from lowfat.models import Claimant, Fund, Expense
class Command(BaseCommand):
help = "CW19 funding request"
def handle(self, *args, **options):
for claimant in Claimant.objects.filter(
application_year=2019
):
candidate = Claimant.objects.get(
email=claimant.email,
application_year=2018
)
for fund in Fund.objects.filter(
claimant=claimant
):
print("Transfering {} from {} to {} ...".format(
fund,
claimant,
candidate
)
)
fund.claimant = candidate
fund.save()
print("Transfer complete.")
print("Removing {} ...".format(claimant))
claimant.delete()
print("Remove successful.")
|
|
c2109312996fed550ddbfaf3f39c79b709757a8c
|
alembic/versions/66ecf0b2aed5_add_pages_table.py
|
alembic/versions/66ecf0b2aed5_add_pages_table.py
|
"""add pages table
Revision ID: 66ecf0b2aed5
Revises: c7476118715f
Create Date: 2019-06-01 16:10:06.519049
"""
# revision identifiers, used by Alembic.
revision = '66ecf0b2aed5'
down_revision = 'c7476118715f'
import datetime
from alembic import op
import sqlalchemy as sa
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table(
'pages',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.Unicode(length=255), nullable=False),
sa.Column('body', sa.UnicodeText, nullable=False),
sa.Column('app_id', sa.Integer, sa.ForeignKey('app.id', ondelete='CASCADE'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('created', sa.Text, default=make_timestamp),
)
def downgrade():
op.drop_table('pages')
|
Create the table via migration.
|
Create the table via migration.
|
Python
|
agpl-3.0
|
Scifabric/pybossa,Scifabric/pybossa
|
Create the table via migration.
|
"""add pages table
Revision ID: 66ecf0b2aed5
Revises: c7476118715f
Create Date: 2019-06-01 16:10:06.519049
"""
# revision identifiers, used by Alembic.
revision = '66ecf0b2aed5'
down_revision = 'c7476118715f'
import datetime
from alembic import op
import sqlalchemy as sa
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table(
'pages',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.Unicode(length=255), nullable=False),
sa.Column('body', sa.UnicodeText, nullable=False),
sa.Column('app_id', sa.Integer, sa.ForeignKey('app.id', ondelete='CASCADE'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('created', sa.Text, default=make_timestamp),
)
def downgrade():
op.drop_table('pages')
|
<commit_before><commit_msg>Create the table via migration.<commit_after>
|
"""add pages table
Revision ID: 66ecf0b2aed5
Revises: c7476118715f
Create Date: 2019-06-01 16:10:06.519049
"""
# revision identifiers, used by Alembic.
revision = '66ecf0b2aed5'
down_revision = 'c7476118715f'
import datetime
from alembic import op
import sqlalchemy as sa
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table(
'pages',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.Unicode(length=255), nullable=False),
sa.Column('body', sa.UnicodeText, nullable=False),
sa.Column('app_id', sa.Integer, sa.ForeignKey('app.id', ondelete='CASCADE'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('created', sa.Text, default=make_timestamp),
)
def downgrade():
op.drop_table('pages')
|
Create the table via migration."""add pages table
Revision ID: 66ecf0b2aed5
Revises: c7476118715f
Create Date: 2019-06-01 16:10:06.519049
"""
# revision identifiers, used by Alembic.
revision = '66ecf0b2aed5'
down_revision = 'c7476118715f'
import datetime
from alembic import op
import sqlalchemy as sa
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table(
'pages',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.Unicode(length=255), nullable=False),
sa.Column('body', sa.UnicodeText, nullable=False),
sa.Column('app_id', sa.Integer, sa.ForeignKey('app.id', ondelete='CASCADE'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('created', sa.Text, default=make_timestamp),
)
def downgrade():
op.drop_table('pages')
|
<commit_before><commit_msg>Create the table via migration.<commit_after>"""add pages table
Revision ID: 66ecf0b2aed5
Revises: c7476118715f
Create Date: 2019-06-01 16:10:06.519049
"""
# revision identifiers, used by Alembic.
revision = '66ecf0b2aed5'
down_revision = 'c7476118715f'
import datetime
from alembic import op
import sqlalchemy as sa
def make_timestamp():
now = datetime.datetime.utcnow()
return now.isoformat()
def upgrade():
op.create_table(
'pages',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.Unicode(length=255), nullable=False),
sa.Column('body', sa.UnicodeText, nullable=False),
sa.Column('app_id', sa.Integer, sa.ForeignKey('app.id', ondelete='CASCADE'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')),
sa.Column('created', sa.Text, default=make_timestamp),
)
def downgrade():
op.drop_table('pages')
|
|
b5e6cf14d6f6442e1fb855e0aa19368b2ce6db15
|
polyaxon_schemas/pod_lifecycle.py
|
polyaxon_schemas/pod_lifecycle.py
|
from hestia.unknown import UNKNOWN
class PodLifeCycle(object):
CONTAINER_CREATING = 'ContainerCreating'
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = UNKNOWN
CHOICES = (
(RUNNING, RUNNING),
(PENDING, PENDING),
(CONTAINER_CREATING, CONTAINER_CREATING),
(SUCCEEDED, SUCCEEDED),
(FAILED, FAILED),
)
DONE_STATUS = [FAILED, SUCCEEDED]
|
Move pod lifecycle to schemas
|
Move pod lifecycle to schemas
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Move pod lifecycle to schemas
|
from hestia.unknown import UNKNOWN
class PodLifeCycle(object):
CONTAINER_CREATING = 'ContainerCreating'
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = UNKNOWN
CHOICES = (
(RUNNING, RUNNING),
(PENDING, PENDING),
(CONTAINER_CREATING, CONTAINER_CREATING),
(SUCCEEDED, SUCCEEDED),
(FAILED, FAILED),
)
DONE_STATUS = [FAILED, SUCCEEDED]
|
<commit_before><commit_msg>Move pod lifecycle to schemas<commit_after>
|
from hestia.unknown import UNKNOWN
class PodLifeCycle(object):
CONTAINER_CREATING = 'ContainerCreating'
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = UNKNOWN
CHOICES = (
(RUNNING, RUNNING),
(PENDING, PENDING),
(CONTAINER_CREATING, CONTAINER_CREATING),
(SUCCEEDED, SUCCEEDED),
(FAILED, FAILED),
)
DONE_STATUS = [FAILED, SUCCEEDED]
|
Move pod lifecycle to schemasfrom hestia.unknown import UNKNOWN
class PodLifeCycle(object):
CONTAINER_CREATING = 'ContainerCreating'
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = UNKNOWN
CHOICES = (
(RUNNING, RUNNING),
(PENDING, PENDING),
(CONTAINER_CREATING, CONTAINER_CREATING),
(SUCCEEDED, SUCCEEDED),
(FAILED, FAILED),
)
DONE_STATUS = [FAILED, SUCCEEDED]
|
<commit_before><commit_msg>Move pod lifecycle to schemas<commit_after>from hestia.unknown import UNKNOWN
class PodLifeCycle(object):
CONTAINER_CREATING = 'ContainerCreating'
PENDING = 'Pending'
RUNNING = 'Running'
SUCCEEDED = 'Succeeded'
FAILED = 'Failed'
UNKNOWN = UNKNOWN
CHOICES = (
(RUNNING, RUNNING),
(PENDING, PENDING),
(CONTAINER_CREATING, CONTAINER_CREATING),
(SUCCEEDED, SUCCEEDED),
(FAILED, FAILED),
)
DONE_STATUS = [FAILED, SUCCEEDED]
|
|
649679d67f611e10a4d1821dcfacf01abf7ac5b2
|
migrations/versions/0157_add_rate_limit_to_service.py
|
migrations/versions/0157_add_rate_limit_to_service.py
|
"""
Revision ID: 0157_add_rate_limit_to_service
Revises: 0156_set_temp_letter_contact
Create Date: 2018-01-08 16:13:25.733336
"""
from alembic import op
import sqlalchemy as sa
revision = '0157_add_rate_limit_to_service'
down_revision = '0156_set_temp_letter_contact'
def upgrade():
op.add_column('services', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
op.add_column('services_history', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
def downgrade():
op.drop_column('services_history', 'rate_limit')
op.drop_column('services', 'rate_limit')
|
Add rate_limit column to service model
|
Add rate_limit column to service model
The API rate limit will be removed from the config and added to services
so that it is possible to change the rate_limit for individual services
in rare cases.
Pivotal story: https://www.pivotaltracker.com/story/show/153992529
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add rate_limit column to service model
The API rate limit will be removed from the config and added to services
so that it is possible to change the rate_limit for individual services
in rare cases.
Pivotal story: https://www.pivotaltracker.com/story/show/153992529
|
"""
Revision ID: 0157_add_rate_limit_to_service
Revises: 0156_set_temp_letter_contact
Create Date: 2018-01-08 16:13:25.733336
"""
from alembic import op
import sqlalchemy as sa
revision = '0157_add_rate_limit_to_service'
down_revision = '0156_set_temp_letter_contact'
def upgrade():
op.add_column('services', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
op.add_column('services_history', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
def downgrade():
op.drop_column('services_history', 'rate_limit')
op.drop_column('services', 'rate_limit')
|
<commit_before><commit_msg>Add rate_limit column to service model
The API rate limit will be removed from the config and added to services
so that it is possible to change the rate_limit for individual services
in rare cases.
Pivotal story: https://www.pivotaltracker.com/story/show/153992529<commit_after>
|
"""
Revision ID: 0157_add_rate_limit_to_service
Revises: 0156_set_temp_letter_contact
Create Date: 2018-01-08 16:13:25.733336
"""
from alembic import op
import sqlalchemy as sa
revision = '0157_add_rate_limit_to_service'
down_revision = '0156_set_temp_letter_contact'
def upgrade():
op.add_column('services', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
op.add_column('services_history', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
def downgrade():
op.drop_column('services_history', 'rate_limit')
op.drop_column('services', 'rate_limit')
|
Add rate_limit column to service model
The API rate limit will be removed from the config and added to services
so that it is possible to change the rate_limit for individual services
in rare cases.
Pivotal story: https://www.pivotaltracker.com/story/show/153992529"""
Revision ID: 0157_add_rate_limit_to_service
Revises: 0156_set_temp_letter_contact
Create Date: 2018-01-08 16:13:25.733336
"""
from alembic import op
import sqlalchemy as sa
revision = '0157_add_rate_limit_to_service'
down_revision = '0156_set_temp_letter_contact'
def upgrade():
op.add_column('services', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
op.add_column('services_history', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
def downgrade():
op.drop_column('services_history', 'rate_limit')
op.drop_column('services', 'rate_limit')
|
<commit_before><commit_msg>Add rate_limit column to service model
The API rate limit will be removed from the config and added to services
so that it is possible to change the rate_limit for individual services
in rare cases.
Pivotal story: https://www.pivotaltracker.com/story/show/153992529<commit_after>"""
Revision ID: 0157_add_rate_limit_to_service
Revises: 0156_set_temp_letter_contact
Create Date: 2018-01-08 16:13:25.733336
"""
from alembic import op
import sqlalchemy as sa
revision = '0157_add_rate_limit_to_service'
down_revision = '0156_set_temp_letter_contact'
def upgrade():
op.add_column('services', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
op.add_column('services_history', sa.Column('rate_limit', sa.Integer(), nullable=False, server_default='3000'))
def downgrade():
op.drop_column('services_history', 'rate_limit')
op.drop_column('services', 'rate_limit')
|
|
02ca253332acacc95e94f0fced2d37c64d804c22
|
kqueen_ui/generic_views.py
|
kqueen_ui/generic_views.py
|
from flask import flash, session
from flask.views import View
from kqueen_ui.api import get_kqueen_client, get_service_client
import logging
logger = logging.getLogger(__name__)
class KQueenView(View):
"""
KQueen UI base view with methods to handle backend API calls.
"""
def _get_kqueen_client(self):
token = session.get('user', {}).get('token', None)
if token:
return get_kqueen_client(token=token)
return None
def _get_kqueen_service_client(self):
return get_service_client()
def _handle_response(self, response):
if response:
if response.status == -1:
user_msg = 'Backend is unavailable at this time, please try again later.'
flash(user_msg, 'danger')
return None
elif response.status == 401:
fmt_action = str(action).lower()
fmt_resource = str(resource).lower()
user_msg = 'You are not authorized to {} {}.'.format(fmt_action, fmt_resource)
flash(user_msg, 'warning')
return None
elif response.status >= 400:
user_msg = 'Exception occured while contacting backend, please try again later.'
flash(user_msg, 'danger')
return response.data
def handle(self):
"""
Override this method with view function
"""
raise NotImplementedError
def kqueen_request(self, resource, action, fnargs=(), fnkwargs={}, service=False):
client = self._get_kqueen_service_client() if service else self._get_kqueen_client()
if not client:
return None
try:
manager = getattr(client, resource)
response = getattr(manager, action)(*fnargs, **fnkwargs)
except AttributeError:
msg = 'Unknown API method reference "{}.{}"'.format(resource, action)
self.logger('error', msg)
return None
except TypeError:
msg = 'Invalid API method arguments; args: {}, kwargs: {}'.format(str(fnargs), str(fnkwargs))
self.logger('error', msg)
return None
return self._handle_response(response)
def logger(self, severity, message):
view = self.__class__.__name__
msg = '{} view: {}'.format(view, message)
logger_fn = getattr(logger, severity)
logger_fn(msg)
def dispatch_request(self):
return self.handle()
|
Add base class for KQueen UI views
|
Add base class for KQueen UI views
|
Python
|
mit
|
atengler/kqueen-ui,atengler/kqueen-ui,atengler/kqueen-ui,atengler/kqueen-ui
|
Add base class for KQueen UI views
|
from flask import flash, session
from flask.views import View
from kqueen_ui.api import get_kqueen_client, get_service_client
import logging
logger = logging.getLogger(__name__)
class KQueenView(View):
"""
KQueen UI base view with methods to handle backend API calls.
"""
def _get_kqueen_client(self):
token = session.get('user', {}).get('token', None)
if token:
return get_kqueen_client(token=token)
return None
def _get_kqueen_service_client(self):
return get_service_client()
def _handle_response(self, response):
if response:
if response.status == -1:
user_msg = 'Backend is unavailable at this time, please try again later.'
flash(user_msg, 'danger')
return None
elif response.status == 401:
fmt_action = str(action).lower()
fmt_resource = str(resource).lower()
user_msg = 'You are not authorized to {} {}.'.format(fmt_action, fmt_resource)
flash(user_msg, 'warning')
return None
elif response.status >= 400:
user_msg = 'Exception occured while contacting backend, please try again later.'
flash(user_msg, 'danger')
return response.data
def handle(self):
"""
Override this method with view function
"""
raise NotImplementedError
def kqueen_request(self, resource, action, fnargs=(), fnkwargs={}, service=False):
client = self._get_kqueen_service_client() if service else self._get_kqueen_client()
if not client:
return None
try:
manager = getattr(client, resource)
response = getattr(manager, action)(*fnargs, **fnkwargs)
except AttributeError:
msg = 'Unknown API method reference "{}.{}"'.format(resource, action)
self.logger('error', msg)
return None
except TypeError:
msg = 'Invalid API method arguments; args: {}, kwargs: {}'.format(str(fnargs), str(fnkwargs))
self.logger('error', msg)
return None
return self._handle_response(response)
def logger(self, severity, message):
view = self.__class__.__name__
msg = '{} view: {}'.format(view, message)
logger_fn = getattr(logger, severity)
logger_fn(msg)
def dispatch_request(self):
return self.handle()
|
<commit_before><commit_msg>Add base class for KQueen UI views<commit_after>
|
from flask import flash, session
from flask.views import View
from kqueen_ui.api import get_kqueen_client, get_service_client
import logging
logger = logging.getLogger(__name__)
class KQueenView(View):
"""
KQueen UI base view with methods to handle backend API calls.
"""
def _get_kqueen_client(self):
token = session.get('user', {}).get('token', None)
if token:
return get_kqueen_client(token=token)
return None
def _get_kqueen_service_client(self):
return get_service_client()
def _handle_response(self, response):
if response:
if response.status == -1:
user_msg = 'Backend is unavailable at this time, please try again later.'
flash(user_msg, 'danger')
return None
elif response.status == 401:
fmt_action = str(action).lower()
fmt_resource = str(resource).lower()
user_msg = 'You are not authorized to {} {}.'.format(fmt_action, fmt_resource)
flash(user_msg, 'warning')
return None
elif response.status >= 400:
user_msg = 'Exception occured while contacting backend, please try again later.'
flash(user_msg, 'danger')
return response.data
def handle(self):
"""
Override this method with view function
"""
raise NotImplementedError
def kqueen_request(self, resource, action, fnargs=(), fnkwargs={}, service=False):
client = self._get_kqueen_service_client() if service else self._get_kqueen_client()
if not client:
return None
try:
manager = getattr(client, resource)
response = getattr(manager, action)(*fnargs, **fnkwargs)
except AttributeError:
msg = 'Unknown API method reference "{}.{}"'.format(resource, action)
self.logger('error', msg)
return None
except TypeError:
msg = 'Invalid API method arguments; args: {}, kwargs: {}'.format(str(fnargs), str(fnkwargs))
self.logger('error', msg)
return None
return self._handle_response(response)
def logger(self, severity, message):
view = self.__class__.__name__
msg = '{} view: {}'.format(view, message)
logger_fn = getattr(logger, severity)
logger_fn(msg)
def dispatch_request(self):
return self.handle()
|
Add base class for KQueen UI viewsfrom flask import flash, session
from flask.views import View
from kqueen_ui.api import get_kqueen_client, get_service_client
import logging
logger = logging.getLogger(__name__)
class KQueenView(View):
"""
KQueen UI base view with methods to handle backend API calls.
"""
def _get_kqueen_client(self):
token = session.get('user', {}).get('token', None)
if token:
return get_kqueen_client(token=token)
return None
def _get_kqueen_service_client(self):
return get_service_client()
def _handle_response(self, response):
if response:
if response.status == -1:
user_msg = 'Backend is unavailable at this time, please try again later.'
flash(user_msg, 'danger')
return None
elif response.status == 401:
fmt_action = str(action).lower()
fmt_resource = str(resource).lower()
user_msg = 'You are not authorized to {} {}.'.format(fmt_action, fmt_resource)
flash(user_msg, 'warning')
return None
elif response.status >= 400:
user_msg = 'Exception occured while contacting backend, please try again later.'
flash(user_msg, 'danger')
return response.data
def handle(self):
"""
Override this method with view function
"""
raise NotImplementedError
def kqueen_request(self, resource, action, fnargs=(), fnkwargs={}, service=False):
client = self._get_kqueen_service_client() if service else self._get_kqueen_client()
if not client:
return None
try:
manager = getattr(client, resource)
response = getattr(manager, action)(*fnargs, **fnkwargs)
except AttributeError:
msg = 'Unknown API method reference "{}.{}"'.format(resource, action)
self.logger('error', msg)
return None
except TypeError:
msg = 'Invalid API method arguments; args: {}, kwargs: {}'.format(str(fnargs), str(fnkwargs))
self.logger('error', msg)
return None
return self._handle_response(response)
def logger(self, severity, message):
view = self.__class__.__name__
msg = '{} view: {}'.format(view, message)
logger_fn = getattr(logger, severity)
logger_fn(msg)
def dispatch_request(self):
return self.handle()
|
<commit_before><commit_msg>Add base class for KQueen UI views<commit_after>from flask import flash, session
from flask.views import View
from kqueen_ui.api import get_kqueen_client, get_service_client
import logging
logger = logging.getLogger(__name__)
class KQueenView(View):
"""
KQueen UI base view with methods to handle backend API calls.
"""
def _get_kqueen_client(self):
token = session.get('user', {}).get('token', None)
if token:
return get_kqueen_client(token=token)
return None
def _get_kqueen_service_client(self):
return get_service_client()
def _handle_response(self, response):
if response:
if response.status == -1:
user_msg = 'Backend is unavailable at this time, please try again later.'
flash(user_msg, 'danger')
return None
elif response.status == 401:
fmt_action = str(action).lower()
fmt_resource = str(resource).lower()
user_msg = 'You are not authorized to {} {}.'.format(fmt_action, fmt_resource)
flash(user_msg, 'warning')
return None
elif response.status >= 400:
user_msg = 'Exception occured while contacting backend, please try again later.'
flash(user_msg, 'danger')
return response.data
def handle(self):
"""
Override this method with view function
"""
raise NotImplementedError
def kqueen_request(self, resource, action, fnargs=(), fnkwargs={}, service=False):
client = self._get_kqueen_service_client() if service else self._get_kqueen_client()
if not client:
return None
try:
manager = getattr(client, resource)
response = getattr(manager, action)(*fnargs, **fnkwargs)
except AttributeError:
msg = 'Unknown API method reference "{}.{}"'.format(resource, action)
self.logger('error', msg)
return None
except TypeError:
msg = 'Invalid API method arguments; args: {}, kwargs: {}'.format(str(fnargs), str(fnkwargs))
self.logger('error', msg)
return None
return self._handle_response(response)
def logger(self, severity, message):
view = self.__class__.__name__
msg = '{} view: {}'.format(view, message)
logger_fn = getattr(logger, severity)
logger_fn(msg)
def dispatch_request(self):
return self.handle()
|
|
b69462f1c671f84c379beca2eaf8b72386f4e2e4
|
math/sum_of_digits/python/sum_of_digits.py
|
math/sum_of_digits/python/sum_of_digits.py
|
def sum_of_digits(n):
sum = 0;
for i in str(n):
sum += int(i)
return sum
print sum_of_digits(3) #3
print sum_of_digits(3454332) #24
|
Implement Sum Of Digits in Python
|
Implement Sum Of Digits in Python
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms
|
Implement Sum Of Digits in Python
|
def sum_of_digits(n):
sum = 0;
for i in str(n):
sum += int(i)
return sum
print sum_of_digits(3) #3
print sum_of_digits(3454332) #24
|
<commit_before><commit_msg>Implement Sum Of Digits in Python<commit_after>
|
def sum_of_digits(n):
sum = 0;
for i in str(n):
sum += int(i)
return sum
print sum_of_digits(3) #3
print sum_of_digits(3454332) #24
|
Implement Sum Of Digits in Pythondef sum_of_digits(n):
sum = 0;
for i in str(n):
sum += int(i)
return sum
print sum_of_digits(3) #3
print sum_of_digits(3454332) #24
|
<commit_before><commit_msg>Implement Sum Of Digits in Python<commit_after>def sum_of_digits(n):
sum = 0;
for i in str(n):
sum += int(i)
return sum
print sum_of_digits(3) #3
print sum_of_digits(3454332) #24
|
|
4146e6cb20197ced3c5c62e9fc2ff7626920d6c0
|
POST_TEST.py
|
POST_TEST.py
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creator
|
Test for ISO NC creator
|
Python
|
bsd-3-clause
|
silasb/heekscnc,silasb/heekscnc,silasb/heekscnc
|
Test for ISO NC creator
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creatorfrom posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
|
50b16f7e3996308ae720a3f186a7a3f8031c07a5
|
split-hathitrust.py
|
split-hathitrust.py
|
import os
# Download up to date filelist
os.system("rsync -azv " +
"data.analytics.hathitrust.org::features/listing/htrc-ef-all-files.txt" +
" .")
# Load the file list
infile = open("htrc-ef-all-files.txt")
i = 0
j = 1
# Write out each set of 10000 files as their own list
curout = open("filelists/list"+str(j)+'.txt', 'w')
print(j)
for line in infile:
i += 1
if i % 10000 == 0:
print(j)
j += 1
curout = open("filelists/list"+str(j)+'.txt', 'w')
curout.write(line)
|
Implement method for splitting files for batch processing
|
Implement method for splitting files for batch processing
|
Python
|
mit
|
bacovcin/hathitrust-features-database,bacovcin/hathitrust-features-database,bacovcin/hathitrust-features-database
|
Implement method for splitting files for batch processing
|
import os
# Download up to date filelist
os.system("rsync -azv " +
"data.analytics.hathitrust.org::features/listing/htrc-ef-all-files.txt" +
" .")
# Load the file list
infile = open("htrc-ef-all-files.txt")
i = 0
j = 1
# Write out each set of 10000 files as their own list
curout = open("filelists/list"+str(j)+'.txt', 'w')
print(j)
for line in infile:
i += 1
if i % 10000 == 0:
print(j)
j += 1
curout = open("filelists/list"+str(j)+'.txt', 'w')
curout.write(line)
|
<commit_before><commit_msg>Implement method for splitting files for batch processing<commit_after>
|
import os
# Download up to date filelist
os.system("rsync -azv " +
"data.analytics.hathitrust.org::features/listing/htrc-ef-all-files.txt" +
" .")
# Load the file list
infile = open("htrc-ef-all-files.txt")
i = 0
j = 1
# Write out each set of 10000 files as their own list
curout = open("filelists/list"+str(j)+'.txt', 'w')
print(j)
for line in infile:
i += 1
if i % 10000 == 0:
print(j)
j += 1
curout = open("filelists/list"+str(j)+'.txt', 'w')
curout.write(line)
|
Implement method for splitting files for batch processingimport os
# Download up to date filelist
os.system("rsync -azv " +
"data.analytics.hathitrust.org::features/listing/htrc-ef-all-files.txt" +
" .")
# Load the file list
infile = open("htrc-ef-all-files.txt")
i = 0
j = 1
# Write out each set of 10000 files as their own list
curout = open("filelists/list"+str(j)+'.txt', 'w')
print(j)
for line in infile:
i += 1
if i % 10000 == 0:
print(j)
j += 1
curout = open("filelists/list"+str(j)+'.txt', 'w')
curout.write(line)
|
<commit_before><commit_msg>Implement method for splitting files for batch processing<commit_after>import os
# Download up to date filelist
os.system("rsync -azv " +
"data.analytics.hathitrust.org::features/listing/htrc-ef-all-files.txt" +
" .")
# Load the file list
infile = open("htrc-ef-all-files.txt")
i = 0
j = 1
# Write out each set of 10000 files as their own list
curout = open("filelists/list"+str(j)+'.txt', 'w')
print(j)
for line in infile:
i += 1
if i % 10000 == 0:
print(j)
j += 1
curout = open("filelists/list"+str(j)+'.txt', 'w')
curout.write(line)
|
|
344b6d4a9675aaf38d3cce97f18be0c365c4110c
|
db/goalie_game.py
|
db/goalie_game.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base
class GoalieGame(Base):
__tablename__ = 'goalie_games'
__autoload__ = True
STANDARD_ATTRS = [
"position", "no", "goals", "assists", "primary_assists",
"secondary_assists", "points", "plus_minus", "penalties", "pim",
"toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts",
"shots_on_goal", "shots_blocked", "shots_missed", "hits",
"giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost",
"on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed"
]
|
Add initial version of goalie game item
|
Add initial version of goalie game item
|
Python
|
mit
|
leaffan/pynhldb
|
Add initial version of goalie game item
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base
class GoalieGame(Base):
__tablename__ = 'goalie_games'
__autoload__ = True
STANDARD_ATTRS = [
"position", "no", "goals", "assists", "primary_assists",
"secondary_assists", "points", "plus_minus", "penalties", "pim",
"toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts",
"shots_on_goal", "shots_blocked", "shots_missed", "hits",
"giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost",
"on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed"
]
|
<commit_before><commit_msg>Add initial version of goalie game item<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base
class GoalieGame(Base):
__tablename__ = 'goalie_games'
__autoload__ = True
STANDARD_ATTRS = [
"position", "no", "goals", "assists", "primary_assists",
"secondary_assists", "points", "plus_minus", "penalties", "pim",
"toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts",
"shots_on_goal", "shots_blocked", "shots_missed", "hits",
"giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost",
"on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed"
]
|
Add initial version of goalie game item#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base
class GoalieGame(Base):
__tablename__ = 'goalie_games'
__autoload__ = True
STANDARD_ATTRS = [
"position", "no", "goals", "assists", "primary_assists",
"secondary_assists", "points", "plus_minus", "penalties", "pim",
"toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts",
"shots_on_goal", "shots_blocked", "shots_missed", "hits",
"giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost",
"on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed"
]
|
<commit_before><commit_msg>Add initial version of goalie game item<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base
class GoalieGame(Base):
__tablename__ = 'goalie_games'
__autoload__ = True
STANDARD_ATTRS = [
"position", "no", "goals", "assists", "primary_assists",
"secondary_assists", "points", "plus_minus", "penalties", "pim",
"toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts",
"shots_on_goal", "shots_blocked", "shots_missed", "hits",
"giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost",
"on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed"
]
|
|
f02ff643e341f92d6e75ef79b42fcd2f15d05e8d
|
exp/influence2/GensimExp.py
|
exp/influence2/GensimExp.py
|
import gensim
import gensim.matutils
import logging
import sys
import numpy
import sklearn.feature_extraction.text as text
import scipy.sparse
from exp.util.PorterTokeniser import PorterTokeniser
from gensim.models.ldamodel import LdaModel
from exp.util.SparseUtils import SparseUtils
from apgl.data.Standardiser import Standardiser
"""
Try to get the right params for Gensim
"""
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3, linewidth=100)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
documentList = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS. user interface management system",
"System and human system engineering testing of EPS.",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
vectoriser = text.TfidfVectorizer(min_df=2, ngram_range=(1,2), binary=True, sublinear_tf=False, norm="l2", max_df=0.95, stop_words="english", tokenizer=PorterTokeniser())
X = vectoriser.fit_transform(documentList)
print(vectoriser.get_feature_names())
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
id2WordDict = dict(zip(range(len(vectoriser.get_feature_names())), vectoriser.get_feature_names()))
k = 10
logging.getLogger('gensim').setLevel(logging.INFO)
lda = LdaModel(corpus, num_topics=k, id2word=id2WordDict, chunksize=1000, distributed=False)
index = gensim.similarities.docsim.SparseMatrixSimilarity(lda[corpus], num_features=k)
newX = vectoriser.transform(["graph"])
newX = [(i, newX[0, i])for i in newX.nonzero()[1]]
result = lda[newX]
similarities = index[result]
similarities = sorted(enumerate(similarities), key=lambda item: -item[1])
print(similarities)
#Compute Helliger distance
result = [i[1] for i in result]
newX = scipy.sparse.csc_matrix(result)
distances = SparseUtils.hellingerDistances(index.index, newX)
print(1 - distances)
#Try cosine metric
X = Standardiser().normaliseArray(numpy.array(index.index.todense()).T).T
newX = numpy.array(newX.todense())
similarities = X.dot(newX.T).flatten()
print(similarities)
|
Check we are using LDA correctly
|
Check we are using LDA correctly
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Check we are using LDA correctly
|
import gensim
import gensim.matutils
import logging
import sys
import numpy
import sklearn.feature_extraction.text as text
import scipy.sparse
from exp.util.PorterTokeniser import PorterTokeniser
from gensim.models.ldamodel import LdaModel
from exp.util.SparseUtils import SparseUtils
from apgl.data.Standardiser import Standardiser
"""
Try to get the right params for Gensim
"""
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3, linewidth=100)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
documentList = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS. user interface management system",
"System and human system engineering testing of EPS.",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
vectoriser = text.TfidfVectorizer(min_df=2, ngram_range=(1,2), binary=True, sublinear_tf=False, norm="l2", max_df=0.95, stop_words="english", tokenizer=PorterTokeniser())
X = vectoriser.fit_transform(documentList)
print(vectoriser.get_feature_names())
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
id2WordDict = dict(zip(range(len(vectoriser.get_feature_names())), vectoriser.get_feature_names()))
k = 10
logging.getLogger('gensim').setLevel(logging.INFO)
lda = LdaModel(corpus, num_topics=k, id2word=id2WordDict, chunksize=1000, distributed=False)
index = gensim.similarities.docsim.SparseMatrixSimilarity(lda[corpus], num_features=k)
newX = vectoriser.transform(["graph"])
newX = [(i, newX[0, i])for i in newX.nonzero()[1]]
result = lda[newX]
similarities = index[result]
similarities = sorted(enumerate(similarities), key=lambda item: -item[1])
print(similarities)
#Compute Helliger distance
result = [i[1] for i in result]
newX = scipy.sparse.csc_matrix(result)
distances = SparseUtils.hellingerDistances(index.index, newX)
print(1 - distances)
#Try cosine metric
X = Standardiser().normaliseArray(numpy.array(index.index.todense()).T).T
newX = numpy.array(newX.todense())
similarities = X.dot(newX.T).flatten()
print(similarities)
|
<commit_before><commit_msg>Check we are using LDA correctly <commit_after>
|
import gensim
import gensim.matutils
import logging
import sys
import numpy
import sklearn.feature_extraction.text as text
import scipy.sparse
from exp.util.PorterTokeniser import PorterTokeniser
from gensim.models.ldamodel import LdaModel
from exp.util.SparseUtils import SparseUtils
from apgl.data.Standardiser import Standardiser
"""
Try to get the right params for Gensim
"""
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3, linewidth=100)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
documentList = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS. user interface management system",
"System and human system engineering testing of EPS.",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
vectoriser = text.TfidfVectorizer(min_df=2, ngram_range=(1,2), binary=True, sublinear_tf=False, norm="l2", max_df=0.95, stop_words="english", tokenizer=PorterTokeniser())
X = vectoriser.fit_transform(documentList)
print(vectoriser.get_feature_names())
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
id2WordDict = dict(zip(range(len(vectoriser.get_feature_names())), vectoriser.get_feature_names()))
k = 10
logging.getLogger('gensim').setLevel(logging.INFO)
lda = LdaModel(corpus, num_topics=k, id2word=id2WordDict, chunksize=1000, distributed=False)
index = gensim.similarities.docsim.SparseMatrixSimilarity(lda[corpus], num_features=k)
newX = vectoriser.transform(["graph"])
newX = [(i, newX[0, i])for i in newX.nonzero()[1]]
result = lda[newX]
similarities = index[result]
similarities = sorted(enumerate(similarities), key=lambda item: -item[1])
print(similarities)
#Compute Helliger distance
result = [i[1] for i in result]
newX = scipy.sparse.csc_matrix(result)
distances = SparseUtils.hellingerDistances(index.index, newX)
print(1 - distances)
#Try cosine metric
X = Standardiser().normaliseArray(numpy.array(index.index.todense()).T).T
newX = numpy.array(newX.todense())
similarities = X.dot(newX.T).flatten()
print(similarities)
|
Check we are using LDA correctly import gensim
import gensim.matutils
import logging
import sys
import numpy
import sklearn.feature_extraction.text as text
import scipy.sparse
from exp.util.PorterTokeniser import PorterTokeniser
from gensim.models.ldamodel import LdaModel
from exp.util.SparseUtils import SparseUtils
from apgl.data.Standardiser import Standardiser
"""
Try to get the right params for Gensim
"""
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3, linewidth=100)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
documentList = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS. user interface management system",
"System and human system engineering testing of EPS.",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
vectoriser = text.TfidfVectorizer(min_df=2, ngram_range=(1,2), binary=True, sublinear_tf=False, norm="l2", max_df=0.95, stop_words="english", tokenizer=PorterTokeniser())
X = vectoriser.fit_transform(documentList)
print(vectoriser.get_feature_names())
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
id2WordDict = dict(zip(range(len(vectoriser.get_feature_names())), vectoriser.get_feature_names()))
k = 10
logging.getLogger('gensim').setLevel(logging.INFO)
lda = LdaModel(corpus, num_topics=k, id2word=id2WordDict, chunksize=1000, distributed=False)
index = gensim.similarities.docsim.SparseMatrixSimilarity(lda[corpus], num_features=k)
newX = vectoriser.transform(["graph"])
newX = [(i, newX[0, i])for i in newX.nonzero()[1]]
result = lda[newX]
similarities = index[result]
similarities = sorted(enumerate(similarities), key=lambda item: -item[1])
print(similarities)
#Compute Helliger distance
result = [i[1] for i in result]
newX = scipy.sparse.csc_matrix(result)
distances = SparseUtils.hellingerDistances(index.index, newX)
print(1 - distances)
#Try cosine metric
X = Standardiser().normaliseArray(numpy.array(index.index.todense()).T).T
newX = numpy.array(newX.todense())
similarities = X.dot(newX.T).flatten()
print(similarities)
|
<commit_before><commit_msg>Check we are using LDA correctly <commit_after>import gensim
import gensim.matutils
import logging
import sys
import numpy
import sklearn.feature_extraction.text as text
import scipy.sparse
from exp.util.PorterTokeniser import PorterTokeniser
from gensim.models.ldamodel import LdaModel
from exp.util.SparseUtils import SparseUtils
from apgl.data.Standardiser import Standardiser
"""
Try to get the right params for Gensim
"""
numpy.random.seed(21)
numpy.set_printoptions(suppress=True, precision=3, linewidth=100)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
documentList = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS. user interface management system",
"System and human system engineering testing of EPS.",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
vectoriser = text.TfidfVectorizer(min_df=2, ngram_range=(1,2), binary=True, sublinear_tf=False, norm="l2", max_df=0.95, stop_words="english", tokenizer=PorterTokeniser())
X = vectoriser.fit_transform(documentList)
print(vectoriser.get_feature_names())
corpus = gensim.matutils.Sparse2Corpus(X, documents_columns=False)
id2WordDict = dict(zip(range(len(vectoriser.get_feature_names())), vectoriser.get_feature_names()))
k = 10
logging.getLogger('gensim').setLevel(logging.INFO)
lda = LdaModel(corpus, num_topics=k, id2word=id2WordDict, chunksize=1000, distributed=False)
index = gensim.similarities.docsim.SparseMatrixSimilarity(lda[corpus], num_features=k)
newX = vectoriser.transform(["graph"])
newX = [(i, newX[0, i])for i in newX.nonzero()[1]]
result = lda[newX]
similarities = index[result]
similarities = sorted(enumerate(similarities), key=lambda item: -item[1])
print(similarities)
#Compute Helliger distance
result = [i[1] for i in result]
newX = scipy.sparse.csc_matrix(result)
distances = SparseUtils.hellingerDistances(index.index, newX)
print(1 - distances)
#Try cosine metric
X = Standardiser().normaliseArray(numpy.array(index.index.todense()).T).T
newX = numpy.array(newX.todense())
similarities = X.dot(newX.T).flatten()
print(similarities)
|
|
97be289976d107f6ef72e797a67489319a980773
|
steamweb/steamwebbrowsertk.py
|
steamweb/steamwebbrowsertk.py
|
from .steamwebbrowser import SteamWebBrowserCfg
from sys import version_info
from PIL.ImageTk import PhotoImage
if version_info.major >= 3: # Python3
import tkinter as tk
else: # Python 2
import Tkinter as tk
class SteamWebBrowserTk(SteamWebBrowserCfg):
''' SteamWebBrowserCfg with Tkinter UI for displaying captcha image
'''
@staticmethod
def _handle_captcha(captcha_data, message=''):
tk_root = tk.Tk()
def close(captcha_text):
if captcha_text.get() != '':
tk_root.destroy() # Faster than .quit() and won't be re-used anyways
tk_root.title('')
tk_root.configure(bg='black')
captcha = PhotoImage(data=captcha_data)
tk.Label(tk_root, text=message, bg='black', fg='white').pack()
tk.Label(tk_root, image=captcha, bg='black').pack()
captcha_text = tk.StringVar()
tk.Entry(
tk_root,
textvariable=captcha_text,
bg='black',
fg='white',
insertbackground='white'
).pack()
tk_root.bind('<Return>', lambda s: close(captcha_text))
tk_root.mainloop()
return captcha_text.get()
|
Add Tkinter UI class from jbzdarkid
|
Add Tkinter UI class from jbzdarkid
|
Python
|
agpl-3.0
|
jayme-github/steamweb,jbzdarkid/steamweb
|
Add Tkinter UI class from jbzdarkid
|
from .steamwebbrowser import SteamWebBrowserCfg
from sys import version_info
from PIL.ImageTk import PhotoImage
if version_info.major >= 3: # Python3
import tkinter as tk
else: # Python 2
import Tkinter as tk
class SteamWebBrowserTk(SteamWebBrowserCfg):
''' SteamWebBrowserCfg with Tkinter UI for displaying captcha image
'''
@staticmethod
def _handle_captcha(captcha_data, message=''):
tk_root = tk.Tk()
def close(captcha_text):
if captcha_text.get() != '':
tk_root.destroy() # Faster than .quit() and won't be re-used anyways
tk_root.title('')
tk_root.configure(bg='black')
captcha = PhotoImage(data=captcha_data)
tk.Label(tk_root, text=message, bg='black', fg='white').pack()
tk.Label(tk_root, image=captcha, bg='black').pack()
captcha_text = tk.StringVar()
tk.Entry(
tk_root,
textvariable=captcha_text,
bg='black',
fg='white',
insertbackground='white'
).pack()
tk_root.bind('<Return>', lambda s: close(captcha_text))
tk_root.mainloop()
return captcha_text.get()
|
<commit_before><commit_msg>Add Tkinter UI class from jbzdarkid<commit_after>
|
from .steamwebbrowser import SteamWebBrowserCfg
from sys import version_info
from PIL.ImageTk import PhotoImage
if version_info.major >= 3: # Python3
import tkinter as tk
else: # Python 2
import Tkinter as tk
class SteamWebBrowserTk(SteamWebBrowserCfg):
''' SteamWebBrowserCfg with Tkinter UI for displaying captcha image
'''
@staticmethod
def _handle_captcha(captcha_data, message=''):
tk_root = tk.Tk()
def close(captcha_text):
if captcha_text.get() != '':
tk_root.destroy() # Faster than .quit() and won't be re-used anyways
tk_root.title('')
tk_root.configure(bg='black')
captcha = PhotoImage(data=captcha_data)
tk.Label(tk_root, text=message, bg='black', fg='white').pack()
tk.Label(tk_root, image=captcha, bg='black').pack()
captcha_text = tk.StringVar()
tk.Entry(
tk_root,
textvariable=captcha_text,
bg='black',
fg='white',
insertbackground='white'
).pack()
tk_root.bind('<Return>', lambda s: close(captcha_text))
tk_root.mainloop()
return captcha_text.get()
|
Add Tkinter UI class from jbzdarkidfrom .steamwebbrowser import SteamWebBrowserCfg
from sys import version_info
from PIL.ImageTk import PhotoImage
if version_info.major >= 3: # Python3
import tkinter as tk
else: # Python 2
import Tkinter as tk
class SteamWebBrowserTk(SteamWebBrowserCfg):
''' SteamWebBrowserCfg with Tkinter UI for displaying captcha image
'''
@staticmethod
def _handle_captcha(captcha_data, message=''):
tk_root = tk.Tk()
def close(captcha_text):
if captcha_text.get() != '':
tk_root.destroy() # Faster than .quit() and won't be re-used anyways
tk_root.title('')
tk_root.configure(bg='black')
captcha = PhotoImage(data=captcha_data)
tk.Label(tk_root, text=message, bg='black', fg='white').pack()
tk.Label(tk_root, image=captcha, bg='black').pack()
captcha_text = tk.StringVar()
tk.Entry(
tk_root,
textvariable=captcha_text,
bg='black',
fg='white',
insertbackground='white'
).pack()
tk_root.bind('<Return>', lambda s: close(captcha_text))
tk_root.mainloop()
return captcha_text.get()
|
<commit_before><commit_msg>Add Tkinter UI class from jbzdarkid<commit_after>from .steamwebbrowser import SteamWebBrowserCfg
from sys import version_info
from PIL.ImageTk import PhotoImage
if version_info.major >= 3: # Python3
import tkinter as tk
else: # Python 2
import Tkinter as tk
class SteamWebBrowserTk(SteamWebBrowserCfg):
''' SteamWebBrowserCfg with Tkinter UI for displaying captcha image
'''
@staticmethod
def _handle_captcha(captcha_data, message=''):
tk_root = tk.Tk()
def close(captcha_text):
if captcha_text.get() != '':
tk_root.destroy() # Faster than .quit() and won't be re-used anyways
tk_root.title('')
tk_root.configure(bg='black')
captcha = PhotoImage(data=captcha_data)
tk.Label(tk_root, text=message, bg='black', fg='white').pack()
tk.Label(tk_root, image=captcha, bg='black').pack()
captcha_text = tk.StringVar()
tk.Entry(
tk_root,
textvariable=captcha_text,
bg='black',
fg='white',
insertbackground='white'
).pack()
tk_root.bind('<Return>', lambda s: close(captcha_text))
tk_root.mainloop()
return captcha_text.get()
|
|
99454f1b62f1770e08c29db727d3b790787a6fa0
|
pcloudpy/core/filters/DisplayNormals.py
|
pcloudpy/core/filters/DisplayNormals.py
|
from vtk import vtkArrowSource, vtkGlyph3D, vtkPolyData
from vtk import vtkRenderer, vtkRenderWindowInteractor, vtkPolyDataMapper, vtkActor, vtkRenderWindow
from .base import FilterBase
class DisplayNormals(FilterBase):
def __init__(self):
super(DisplayNormals, self).__init__()
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(DisplayNormals, self).set_input(input_data)
return True
else:
return False
def update(self):
# Source for the glyph filter
arrow = vtkArrowSource()
arrow.SetTipResolution(8)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInput(self.input_)
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(0.1)
#glyph.SetColorModeToColorByVector()
#glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
self.output_ = glyph.GetOutput()
|
Add Filter that display normal vector (it needs the pre-computed normal vectors)
|
Add Filter that display normal vector (it needs the pre-computed normal vectors)
|
Python
|
bsd-3-clause
|
mmolero/pcloudpy
|
Add Filter that display normal vector (it needs the pre-computed normal vectors)
|
from vtk import vtkArrowSource, vtkGlyph3D, vtkPolyData
from vtk import vtkRenderer, vtkRenderWindowInteractor, vtkPolyDataMapper, vtkActor, vtkRenderWindow
from .base import FilterBase
class DisplayNormals(FilterBase):
def __init__(self):
super(DisplayNormals, self).__init__()
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(DisplayNormals, self).set_input(input_data)
return True
else:
return False
def update(self):
# Source for the glyph filter
arrow = vtkArrowSource()
arrow.SetTipResolution(8)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInput(self.input_)
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(0.1)
#glyph.SetColorModeToColorByVector()
#glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
self.output_ = glyph.GetOutput()
|
<commit_before><commit_msg>Add Filter that display normal vector (it needs the pre-computed normal vectors)<commit_after>
|
from vtk import vtkArrowSource, vtkGlyph3D, vtkPolyData
from vtk import vtkRenderer, vtkRenderWindowInteractor, vtkPolyDataMapper, vtkActor, vtkRenderWindow
from .base import FilterBase
class DisplayNormals(FilterBase):
def __init__(self):
super(DisplayNormals, self).__init__()
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(DisplayNormals, self).set_input(input_data)
return True
else:
return False
def update(self):
# Source for the glyph filter
arrow = vtkArrowSource()
arrow.SetTipResolution(8)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInput(self.input_)
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(0.1)
#glyph.SetColorModeToColorByVector()
#glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
self.output_ = glyph.GetOutput()
|
Add Filter that display normal vector (it needs the pre-computed normal vectors)from vtk import vtkArrowSource, vtkGlyph3D, vtkPolyData
from vtk import vtkRenderer, vtkRenderWindowInteractor, vtkPolyDataMapper, vtkActor, vtkRenderWindow
from .base import FilterBase
class DisplayNormals(FilterBase):
def __init__(self):
super(DisplayNormals, self).__init__()
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(DisplayNormals, self).set_input(input_data)
return True
else:
return False
def update(self):
# Source for the glyph filter
arrow = vtkArrowSource()
arrow.SetTipResolution(8)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInput(self.input_)
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(0.1)
#glyph.SetColorModeToColorByVector()
#glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
self.output_ = glyph.GetOutput()
|
<commit_before><commit_msg>Add Filter that display normal vector (it needs the pre-computed normal vectors)<commit_after>from vtk import vtkArrowSource, vtkGlyph3D, vtkPolyData
from vtk import vtkRenderer, vtkRenderWindowInteractor, vtkPolyDataMapper, vtkActor, vtkRenderWindow
from .base import FilterBase
class DisplayNormals(FilterBase):
def __init__(self):
super(DisplayNormals, self).__init__()
def set_input(self, input_data):
if isinstance(input_data, vtkPolyData):
super(DisplayNormals, self).set_input(input_data)
return True
else:
return False
def update(self):
# Source for the glyph filter
arrow = vtkArrowSource()
arrow.SetTipResolution(8)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInput(self.input_)
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(0.1)
#glyph.SetColorModeToColorByVector()
#glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
self.output_ = glyph.GetOutput()
|
|
b16ac90d152bc4883162df235b837726c60ce94f
|
dacapo_analyzer.py
|
dacapo_analyzer.py
|
import re
BENCHMARKS = set(( 'avrora'
, 'batik'
, 'eclipse'
, 'fop'
, 'h2'
, 'jython'
, 'luindex'
, 'lusearch'
, 'pmd'
, 'sunflow'
, 'tomcat'
, 'tradebeans'
, 'tradesoap'
, 'xalan'))
WALLCLOCK_RE = re.compile(r'((?P<succed>FAILED|PASSED) in (?P<time>\d+) msec)')
def dacapo_wallclock(output):
"""
:param output: benchmark output
:returns: list of relevant parts for wallclock time
:rtype: list of tuples as (whole relevant part, PASSED/FAILED, time in msec)
"""
return WALLCLOCK_RE.findall(output)
|
Add basic dacapo wallclock analyzer.
|
[client] Add basic dacapo wallclock analyzer.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>
|
Python
|
mit
|
fhirschmann/penchy,fhirschmann/penchy
|
[client] Add basic dacapo wallclock analyzer.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>
|
import re
BENCHMARKS = set(( 'avrora'
, 'batik'
, 'eclipse'
, 'fop'
, 'h2'
, 'jython'
, 'luindex'
, 'lusearch'
, 'pmd'
, 'sunflow'
, 'tomcat'
, 'tradebeans'
, 'tradesoap'
, 'xalan'))
WALLCLOCK_RE = re.compile(r'((?P<succed>FAILED|PASSED) in (?P<time>\d+) msec)')
def dacapo_wallclock(output):
"""
:param output: benchmark output
:returns: list of relevant parts for wallclock time
:rtype: list of tuples as (whole relevant part, PASSED/FAILED, time in msec)
"""
return WALLCLOCK_RE.findall(output)
|
<commit_before><commit_msg>[client] Add basic dacapo wallclock analyzer.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com><commit_after>
|
import re
BENCHMARKS = set(( 'avrora'
, 'batik'
, 'eclipse'
, 'fop'
, 'h2'
, 'jython'
, 'luindex'
, 'lusearch'
, 'pmd'
, 'sunflow'
, 'tomcat'
, 'tradebeans'
, 'tradesoap'
, 'xalan'))
WALLCLOCK_RE = re.compile(r'((?P<succed>FAILED|PASSED) in (?P<time>\d+) msec)')
def dacapo_wallclock(output):
"""
:param output: benchmark output
:returns: list of relevant parts for wallclock time
:rtype: list of tuples as (whole relevant part, PASSED/FAILED, time in msec)
"""
return WALLCLOCK_RE.findall(output)
|
[client] Add basic dacapo wallclock analyzer.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com>import re
BENCHMARKS = set(( 'avrora'
, 'batik'
, 'eclipse'
, 'fop'
, 'h2'
, 'jython'
, 'luindex'
, 'lusearch'
, 'pmd'
, 'sunflow'
, 'tomcat'
, 'tradebeans'
, 'tradesoap'
, 'xalan'))
WALLCLOCK_RE = re.compile(r'((?P<succed>FAILED|PASSED) in (?P<time>\d+) msec)')
def dacapo_wallclock(output):
"""
:param output: benchmark output
:returns: list of relevant parts for wallclock time
:rtype: list of tuples as (whole relevant part, PASSED/FAILED, time in msec)
"""
return WALLCLOCK_RE.findall(output)
|
<commit_before><commit_msg>[client] Add basic dacapo wallclock analyzer.
Signed-off-by: Michael Markert <5eb998b7ac86da375651a4cd767b88c9dad25896@googlemail.com><commit_after>import re
BENCHMARKS = set(( 'avrora'
, 'batik'
, 'eclipse'
, 'fop'
, 'h2'
, 'jython'
, 'luindex'
, 'lusearch'
, 'pmd'
, 'sunflow'
, 'tomcat'
, 'tradebeans'
, 'tradesoap'
, 'xalan'))
WALLCLOCK_RE = re.compile(r'((?P<succed>FAILED|PASSED) in (?P<time>\d+) msec)')
def dacapo_wallclock(output):
"""
:param output: benchmark output
:returns: list of relevant parts for wallclock time
:rtype: list of tuples as (whole relevant part, PASSED/FAILED, time in msec)
"""
return WALLCLOCK_RE.findall(output)
|
|
f0e180387a37437fe7e8d37fa2806e7d47736bfc
|
pyheufybot/bothandler.py
|
pyheufybot/bothandler.py
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
self.globalConfig.loadConfig(None)
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
if not self.globalConfig.loadConfig(None):
return
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
Make sure the application doesn't continue without a config
|
Make sure the application doesn't continue without a config
|
Python
|
mit
|
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
self.globalConfig.loadConfig(None)
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
Make sure the application doesn't continue without a config
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
if not self.globalConfig.loadConfig(None):
return
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
<commit_before>import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
self.globalConfig.loadConfig(None)
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
<commit_msg>Make sure the application doesn't continue without a config<commit_after>
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
if not self.globalConfig.loadConfig(None):
return
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
self.globalConfig.loadConfig(None)
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
Make sure the application doesn't continue without a configimport os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
if not self.globalConfig.loadConfig(None):
return
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
<commit_before>import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
self.globalConfig.loadConfig(None)
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
<commit_msg>Make sure the application doesn't continue without a config<commit_after>import os
from twisted.internet import reactor
from heufybot import HeufyBot, HeufyBotFactory
from config import Config
class BotHandler(object):
factories = {}
globalConfig = None
def __init__(self):
print "--- Loading configs..."
self.globalConfig = Config("globalconfig.yml")
if not self.globalConfig.loadConfig(None):
return
configList = self.getConfigList()
if len(configList) == 0:
print "WARNING: No server configs found. Using the global config instead."
else:
for filename in self.getConfigList():
config = Config(filename, globalConfig.settings)
def getConfigList(self):
root = os.path.join("config")
configs = []
for item in os.listdir(root):
if not os.path.isfile(os.path.join(root, item)):
continue
if not item.endswith(".yml"):
continue
if item == "globalconfig.yml":
continue
configs.append(item)
return configs
if __name__ == "__main__":
# Create folders
if not os.path.exists(os.path.join("config")):
os.makedirs("config")
handler = BotHandler()
|
8fc31b8a5c36467ca44f88fedaf0e9f6b47bade9
|
genealogio/migrations/0025_auto_20160316_2247.py
|
genealogio/migrations/0025_auto_20160316_2247.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0024_auto_20160316_2039'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ('date', 'event_type'), 'verbose_name': 'Ereignis', 'verbose_name_plural': 'Ereignisse'},
),
]
|
Sort events by date, add missing migration
|
Sort events by date, add missing migration
|
Python
|
bsd-3-clause
|
ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio
|
Sort events by date, add missing migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0024_auto_20160316_2039'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ('date', 'event_type'), 'verbose_name': 'Ereignis', 'verbose_name_plural': 'Ereignisse'},
),
]
|
<commit_before><commit_msg>Sort events by date, add missing migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0024_auto_20160316_2039'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ('date', 'event_type'), 'verbose_name': 'Ereignis', 'verbose_name_plural': 'Ereignisse'},
),
]
|
Sort events by date, add missing migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0024_auto_20160316_2039'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ('date', 'event_type'), 'verbose_name': 'Ereignis', 'verbose_name_plural': 'Ereignisse'},
),
]
|
<commit_before><commit_msg>Sort events by date, add missing migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genealogio', '0024_auto_20160316_2039'),
]
operations = [
migrations.AlterModelOptions(
name='event',
options={'ordering': ('date', 'event_type'), 'verbose_name': 'Ereignis', 'verbose_name_plural': 'Ereignisse'},
),
]
|
|
a2708fcbf836c9ecb9efc546606ce6011a08f15a
|
firs_test_add_group.py
|
firs_test_add_group.py
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class firs_test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_firs_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_id("container").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("logo")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("comment")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Add test by selenium builder
|
Add test by selenium builder
|
Python
|
apache-2.0
|
maximatorrus/automated_testing_python
|
Add test by selenium builder
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class firs_test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_firs_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_id("container").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("logo")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("comment")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test by selenium builder<commit_after>
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class firs_test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_firs_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_id("container").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("logo")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("comment")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Add test by selenium builder# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class firs_test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_firs_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_id("container").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("logo")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("comment")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test by selenium builder<commit_after># -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class firs_test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_firs_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_id("container").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("group1")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("logo")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("comment")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
|
a387c0307f55d635d4a6064a3df2e77ecf9e1157
|
st2common/tests/unit/test_util_types.py
|
st2common/tests/unit/test_util_types.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.types import OrderedSet
__all__ = [
'OrderedTestTypeTestCase'
]
class OrderedTestTypeTestCase(unittest2.TestCase):
def test_ordered_set(self):
set1 = OrderedSet([1, 2, 3, 3, 4, 2, 1, 5])
self.assertEqual(set1, [1, 2, 3, 4, 5])
set2 = OrderedSet([5, 4, 3, 2, 1])
self.assertEqual(set2, [5, 4, 3, 2, 1])
set3 = OrderedSet([1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
self.assertEqual(set3, [1, 2, 3, 4, 5])
set4 = OrderedSet([1, 1, 1, 1, 4, 4, 4, 9])
self.assertEqual(set4, [1, 4, 9])
|
Add tests for OrderedSet type.
|
Add tests for OrderedSet type.
|
Python
|
apache-2.0
|
Plexxi/st2,StackStorm/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2
|
Add tests for OrderedSet type.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.types import OrderedSet
__all__ = [
'OrderedTestTypeTestCase'
]
class OrderedTestTypeTestCase(unittest2.TestCase):
def test_ordered_set(self):
set1 = OrderedSet([1, 2, 3, 3, 4, 2, 1, 5])
self.assertEqual(set1, [1, 2, 3, 4, 5])
set2 = OrderedSet([5, 4, 3, 2, 1])
self.assertEqual(set2, [5, 4, 3, 2, 1])
set3 = OrderedSet([1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
self.assertEqual(set3, [1, 2, 3, 4, 5])
set4 = OrderedSet([1, 1, 1, 1, 4, 4, 4, 9])
self.assertEqual(set4, [1, 4, 9])
|
<commit_before><commit_msg>Add tests for OrderedSet type.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.types import OrderedSet
__all__ = [
'OrderedTestTypeTestCase'
]
class OrderedTestTypeTestCase(unittest2.TestCase):
def test_ordered_set(self):
set1 = OrderedSet([1, 2, 3, 3, 4, 2, 1, 5])
self.assertEqual(set1, [1, 2, 3, 4, 5])
set2 = OrderedSet([5, 4, 3, 2, 1])
self.assertEqual(set2, [5, 4, 3, 2, 1])
set3 = OrderedSet([1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
self.assertEqual(set3, [1, 2, 3, 4, 5])
set4 = OrderedSet([1, 1, 1, 1, 4, 4, 4, 9])
self.assertEqual(set4, [1, 4, 9])
|
Add tests for OrderedSet type.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.types import OrderedSet
__all__ = [
'OrderedTestTypeTestCase'
]
class OrderedTestTypeTestCase(unittest2.TestCase):
def test_ordered_set(self):
set1 = OrderedSet([1, 2, 3, 3, 4, 2, 1, 5])
self.assertEqual(set1, [1, 2, 3, 4, 5])
set2 = OrderedSet([5, 4, 3, 2, 1])
self.assertEqual(set2, [5, 4, 3, 2, 1])
set3 = OrderedSet([1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
self.assertEqual(set3, [1, 2, 3, 4, 5])
set4 = OrderedSet([1, 1, 1, 1, 4, 4, 4, 9])
self.assertEqual(set4, [1, 4, 9])
|
<commit_before><commit_msg>Add tests for OrderedSet type.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.types import OrderedSet
__all__ = [
'OrderedTestTypeTestCase'
]
class OrderedTestTypeTestCase(unittest2.TestCase):
def test_ordered_set(self):
set1 = OrderedSet([1, 2, 3, 3, 4, 2, 1, 5])
self.assertEqual(set1, [1, 2, 3, 4, 5])
set2 = OrderedSet([5, 4, 3, 2, 1])
self.assertEqual(set2, [5, 4, 3, 2, 1])
set3 = OrderedSet([1, 2, 3, 4, 5, 5, 4, 3, 2, 1])
self.assertEqual(set3, [1, 2, 3, 4, 5])
set4 = OrderedSet([1, 1, 1, 1, 4, 4, 4, 9])
self.assertEqual(set4, [1, 4, 9])
|
|
906440692ae179c5ce4ee8211b13121e025c2651
|
ideascube/conf/idb_de_dusseldorf.py
|
ideascube/conf/idb_de_dusseldorf.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Welcome Point, Dusseldorf"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Welcome Point Dusseldorf"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['DE']
TIME_ZONE = None
LANGUAGE_CODE = 'de'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['de', 'ar', 'fa', 'en']
},
{
'id': 'w2eu',
}
]
|
Add conf file for Welcome Point Dusseldorf, DE
|
Add conf file for Welcome Point Dusseldorf, DE
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Welcome Point Dusseldorf, DE
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Welcome Point, Dusseldorf"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Welcome Point Dusseldorf"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['DE']
TIME_ZONE = None
LANGUAGE_CODE = 'de'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['de', 'ar', 'fa', 'en']
},
{
'id': 'w2eu',
}
]
|
<commit_before><commit_msg>Add conf file for Welcome Point Dusseldorf, DE<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Welcome Point, Dusseldorf"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Welcome Point Dusseldorf"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['DE']
TIME_ZONE = None
LANGUAGE_CODE = 'de'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['de', 'ar', 'fa', 'en']
},
{
'id': 'w2eu',
}
]
|
Add conf file for Welcome Point Dusseldorf, DE# -*- coding: utf-8 -*-
"""Ideaxbox for Welcome Point, Dusseldorf"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Welcome Point Dusseldorf"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['DE']
TIME_ZONE = None
LANGUAGE_CODE = 'de'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['de', 'ar', 'fa', 'en']
},
{
'id': 'w2eu',
}
]
|
<commit_before><commit_msg>Add conf file for Welcome Point Dusseldorf, DE<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for Welcome Point, Dusseldorf"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Welcome Point Dusseldorf"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['DE']
TIME_ZONE = None
LANGUAGE_CODE = 'de'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['de', 'ar', 'fa', 'en']
},
{
'id': 'w2eu',
}
]
|
|
3849b0a5cae5ca7d214c864fa1722a9da98a3ec2
|
indra/sources/minerva/id_mapping.py
|
indra/sources/minerva/id_mapping.py
|
from indra.databases import chebi_client
from indra.ontology.standardize import standardize_db_refs
minerva_to_indra_map = {
'UNIPROT': 'UP',
'REFSEQ': 'REFSEQ_PROT',
'ENTREZ': 'EGID',
'INTERPRO': 'IP',
}
def fix_id_standards(db_ns, db_id):
if db_ns == 'CHEBI':
if not db_id.startswith('CHEBI:'):
db_id = f'CHEBI:{db_id}'
db_id = chebi_client.get_primary_id(db_id)
elif db_ns == 'HGNC' and db_id.startswith('HGNC:'):
db_id = db_id[5:]
return db_ns, db_id
def indra_db_refs_from_minerva_refs(refs):
db_refs = {}
for db_ns, db_id in refs:
db_ns = minerva_to_indra_map[db_ns] \
if db_ns in minerva_to_indra_map else db_ns
db_nbs, db_id = fix_id_standards(db_ns, db_id)
db_refs[db_ns] = db_id
db_refs = standardize_db_refs(db_refs)
return db_refs
|
Move relevant parts of ID mapping from covid-19
|
Move relevant parts of ID mapping from covid-19
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,sorgerlab/indra,bgyori/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,sorgerlab/indra,johnbachman/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy
|
Move relevant parts of ID mapping from covid-19
|
from indra.databases import chebi_client
from indra.ontology.standardize import standardize_db_refs
minerva_to_indra_map = {
'UNIPROT': 'UP',
'REFSEQ': 'REFSEQ_PROT',
'ENTREZ': 'EGID',
'INTERPRO': 'IP',
}
def fix_id_standards(db_ns, db_id):
if db_ns == 'CHEBI':
if not db_id.startswith('CHEBI:'):
db_id = f'CHEBI:{db_id}'
db_id = chebi_client.get_primary_id(db_id)
elif db_ns == 'HGNC' and db_id.startswith('HGNC:'):
db_id = db_id[5:]
return db_ns, db_id
def indra_db_refs_from_minerva_refs(refs):
db_refs = {}
for db_ns, db_id in refs:
db_ns = minerva_to_indra_map[db_ns] \
if db_ns in minerva_to_indra_map else db_ns
db_nbs, db_id = fix_id_standards(db_ns, db_id)
db_refs[db_ns] = db_id
db_refs = standardize_db_refs(db_refs)
return db_refs
|
<commit_before><commit_msg>Move relevant parts of ID mapping from covid-19<commit_after>
|
from indra.databases import chebi_client
from indra.ontology.standardize import standardize_db_refs
minerva_to_indra_map = {
'UNIPROT': 'UP',
'REFSEQ': 'REFSEQ_PROT',
'ENTREZ': 'EGID',
'INTERPRO': 'IP',
}
def fix_id_standards(db_ns, db_id):
if db_ns == 'CHEBI':
if not db_id.startswith('CHEBI:'):
db_id = f'CHEBI:{db_id}'
db_id = chebi_client.get_primary_id(db_id)
elif db_ns == 'HGNC' and db_id.startswith('HGNC:'):
db_id = db_id[5:]
return db_ns, db_id
def indra_db_refs_from_minerva_refs(refs):
db_refs = {}
for db_ns, db_id in refs:
db_ns = minerva_to_indra_map[db_ns] \
if db_ns in minerva_to_indra_map else db_ns
db_nbs, db_id = fix_id_standards(db_ns, db_id)
db_refs[db_ns] = db_id
db_refs = standardize_db_refs(db_refs)
return db_refs
|
Move relevant parts of ID mapping from covid-19from indra.databases import chebi_client
from indra.ontology.standardize import standardize_db_refs
minerva_to_indra_map = {
'UNIPROT': 'UP',
'REFSEQ': 'REFSEQ_PROT',
'ENTREZ': 'EGID',
'INTERPRO': 'IP',
}
def fix_id_standards(db_ns, db_id):
if db_ns == 'CHEBI':
if not db_id.startswith('CHEBI:'):
db_id = f'CHEBI:{db_id}'
db_id = chebi_client.get_primary_id(db_id)
elif db_ns == 'HGNC' and db_id.startswith('HGNC:'):
db_id = db_id[5:]
return db_ns, db_id
def indra_db_refs_from_minerva_refs(refs):
db_refs = {}
for db_ns, db_id in refs:
db_ns = minerva_to_indra_map[db_ns] \
if db_ns in minerva_to_indra_map else db_ns
db_nbs, db_id = fix_id_standards(db_ns, db_id)
db_refs[db_ns] = db_id
db_refs = standardize_db_refs(db_refs)
return db_refs
|
<commit_before><commit_msg>Move relevant parts of ID mapping from covid-19<commit_after>from indra.databases import chebi_client
from indra.ontology.standardize import standardize_db_refs
minerva_to_indra_map = {
'UNIPROT': 'UP',
'REFSEQ': 'REFSEQ_PROT',
'ENTREZ': 'EGID',
'INTERPRO': 'IP',
}
def fix_id_standards(db_ns, db_id):
if db_ns == 'CHEBI':
if not db_id.startswith('CHEBI:'):
db_id = f'CHEBI:{db_id}'
db_id = chebi_client.get_primary_id(db_id)
elif db_ns == 'HGNC' and db_id.startswith('HGNC:'):
db_id = db_id[5:]
return db_ns, db_id
def indra_db_refs_from_minerva_refs(refs):
db_refs = {}
for db_ns, db_id in refs:
db_ns = minerva_to_indra_map[db_ns] \
if db_ns in minerva_to_indra_map else db_ns
db_nbs, db_id = fix_id_standards(db_ns, db_id)
db_refs[db_ns] = db_id
db_refs = standardize_db_refs(db_refs)
return db_refs
|
|
fa8099ebbf06fc45e62d8b1ed6a12b5a2405476c
|
stdnum/imo.py
|
stdnum/imo.py
|
# imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
return 'IMO ' + compact(number)
|
Add int. maritime org. number (IMO)
|
Add int. maritime org. number (IMO)
This adds checks for the International Maritime Organization number used
to identify ships. However, there seem to be a lot of ships with an IMO
number that does not follow these rules (different check digits or even
length).
|
Python
|
lgpl-2.1
|
holvi/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,arthurdejong/python-stdnum
|
Add int. maritime org. number (IMO)
This adds checks for the International Maritime Organization number used
to identify ships. However, there seem to be a lot of ships with an IMO
number that does not follow these rules (different check digits or even
length).
|
# imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
return 'IMO ' + compact(number)
|
<commit_before><commit_msg>Add int. maritime org. number (IMO)
This adds checks for the International Maritime Organization number used
to identify ships. However, there seem to be a lot of ships with an IMO
number that does not follow these rules (different check digits or even
length).<commit_after>
|
# imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
return 'IMO ' + compact(number)
|
Add int. maritime org. number (IMO)
This adds checks for the International Maritime Organization number used
to identify ships. However, there seem to be a lot of ships with an IMO
number that does not follow these rules (different check digits or even
length).# imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
return 'IMO ' + compact(number)
|
<commit_before><commit_msg>Add int. maritime org. number (IMO)
This adds checks for the International Maritime Organization number used
to identify ships. However, there seem to be a lot of ships with an IMO
number that does not follow these rules (different check digits or even
length).<commit_after># imo.py - functions for handling IMO numbers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""IMO number (International Maritime Organization number).
A number used to uniquely identify ships (the hull) for purposes of
registering owners and management companies. The ship identification number
consists of a six-digit sequentially assigned number and a check digit. The
number is usually prefixed with "IMO".
Note that there seem to be a large number of ships with an IMO that does not
have a valid check digit or even have a different length.
>>> validate('IMO 9319466')
'9319466'
>>> validate('IMO 8814275')
'8814275'
>>> validate('8814274')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('8814275')
'IMO 8814275'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' ').upper().strip()
if number.startswith('IMO'):
number = number[3:]
return number
def calc_check_digit(number):
"""Calculate the check digits for the number."""
return str(sum(int(n) * (7 - i) for i, n in enumerate(number[:6])) % 10)
def validate(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 7:
raise InvalidLength()
if calc_check_digit(number[:-1]) != number[-1]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is valid. This checks the length
and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
return 'IMO ' + compact(number)
|
|
1965291f9e67b1b7923353f1b8892e0fabb543d4
|
dyngraph/cycledetect_ui.py
|
dyngraph/cycledetect_ui.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\cycledetect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CycleDetection(object):
def setupUi(self, CycleDetection):
CycleDetection.setObjectName(_fromUtf8("CycleDetection"))
CycleDetection.resize(387, 371)
self.verticalLayout = QtGui.QVBoxLayout(CycleDetection)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.table = QtGui.QTableWidget(CycleDetection)
self.table.setColumnCount(2)
self.table.setObjectName(_fromUtf8("table"))
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.okButton = QtGui.QPushButton(CycleDetection)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(CycleDetection)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CycleDetection)
QtCore.QMetaObject.connectSlotsByName(CycleDetection)
def retranslateUi(self, CycleDetection):
CycleDetection.setWindowTitle(_translate("CycleDetection", "Cycle Detection", None))
self.okButton.setText(_translate("CycleDetection", "Ok", None))
self.cancelButton.setText(_translate("CycleDetection", "Cancel", None))
|
Add compiled cycle detect ui.
|
Add compiled cycle detect ui.
|
Python
|
isc
|
jaj42/dyngraph,jaj42/GraPhysio,jaj42/GraPhysio
|
Add compiled cycle detect ui.
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\cycledetect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CycleDetection(object):
def setupUi(self, CycleDetection):
CycleDetection.setObjectName(_fromUtf8("CycleDetection"))
CycleDetection.resize(387, 371)
self.verticalLayout = QtGui.QVBoxLayout(CycleDetection)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.table = QtGui.QTableWidget(CycleDetection)
self.table.setColumnCount(2)
self.table.setObjectName(_fromUtf8("table"))
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.okButton = QtGui.QPushButton(CycleDetection)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(CycleDetection)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CycleDetection)
QtCore.QMetaObject.connectSlotsByName(CycleDetection)
def retranslateUi(self, CycleDetection):
CycleDetection.setWindowTitle(_translate("CycleDetection", "Cycle Detection", None))
self.okButton.setText(_translate("CycleDetection", "Ok", None))
self.cancelButton.setText(_translate("CycleDetection", "Cancel", None))
|
<commit_before><commit_msg>Add compiled cycle detect ui.<commit_after>
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\cycledetect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CycleDetection(object):
def setupUi(self, CycleDetection):
CycleDetection.setObjectName(_fromUtf8("CycleDetection"))
CycleDetection.resize(387, 371)
self.verticalLayout = QtGui.QVBoxLayout(CycleDetection)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.table = QtGui.QTableWidget(CycleDetection)
self.table.setColumnCount(2)
self.table.setObjectName(_fromUtf8("table"))
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.okButton = QtGui.QPushButton(CycleDetection)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(CycleDetection)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CycleDetection)
QtCore.QMetaObject.connectSlotsByName(CycleDetection)
def retranslateUi(self, CycleDetection):
CycleDetection.setWindowTitle(_translate("CycleDetection", "Cycle Detection", None))
self.okButton.setText(_translate("CycleDetection", "Ok", None))
self.cancelButton.setText(_translate("CycleDetection", "Cancel", None))
|
Add compiled cycle detect ui.# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\cycledetect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CycleDetection(object):
def setupUi(self, CycleDetection):
CycleDetection.setObjectName(_fromUtf8("CycleDetection"))
CycleDetection.resize(387, 371)
self.verticalLayout = QtGui.QVBoxLayout(CycleDetection)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.table = QtGui.QTableWidget(CycleDetection)
self.table.setColumnCount(2)
self.table.setObjectName(_fromUtf8("table"))
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.okButton = QtGui.QPushButton(CycleDetection)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(CycleDetection)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CycleDetection)
QtCore.QMetaObject.connectSlotsByName(CycleDetection)
def retranslateUi(self, CycleDetection):
CycleDetection.setWindowTitle(_translate("CycleDetection", "Cycle Detection", None))
self.okButton.setText(_translate("CycleDetection", "Ok", None))
self.cancelButton.setText(_translate("CycleDetection", "Cancel", None))
|
<commit_before><commit_msg>Add compiled cycle detect ui.<commit_after># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\cycledetect.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CycleDetection(object):
def setupUi(self, CycleDetection):
CycleDetection.setObjectName(_fromUtf8("CycleDetection"))
CycleDetection.resize(387, 371)
self.verticalLayout = QtGui.QVBoxLayout(CycleDetection)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.table = QtGui.QTableWidget(CycleDetection)
self.table.setColumnCount(2)
self.table.setObjectName(_fromUtf8("table"))
self.table.setRowCount(0)
self.verticalLayout.addWidget(self.table)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.okButton = QtGui.QPushButton(CycleDetection)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.cancelButton = QtGui.QPushButton(CycleDetection)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CycleDetection)
QtCore.QMetaObject.connectSlotsByName(CycleDetection)
def retranslateUi(self, CycleDetection):
CycleDetection.setWindowTitle(_translate("CycleDetection", "Cycle Detection", None))
self.okButton.setText(_translate("CycleDetection", "Ok", None))
self.cancelButton.setText(_translate("CycleDetection", "Cancel", None))
|
|
676fd73fa8c6b5a70e5caef6d6b195e4676a0a18
|
tests/test_prefix_q_expression.py
|
tests/test_prefix_q_expression.py
|
from unittest import TestCase
from django.db.models import Q
from binder.views import prefix_q_expression
from binder.permissions.views import is_q_child_equal
from .testapp.models import Animal
class TestPrefixQExpression(TestCase):
def test_simple_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1), 'prefix'),
Q(prefix__foo=1),
))
def test_nested_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1) & ~Q(bar=2) | Q(baz=3), 'prefix'),
Q(prefix__foo=1) & ~Q(prefix__bar=2) | Q(prefix__baz=3),
))
def test_prefix_identity(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(pk__in=[]), 'prefix'),
Q(pk__in=[]),
))
def test_antiprefix_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__name='Bokito'), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', name='Bokito'),
))
def test_antiprefix_no_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_pk(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__pk=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_modifier(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__in=[1, 2, 3]), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk__in=[1, 2, 3]),
))
|
Add tests for prefix Q expression
|
Add tests for prefix Q expression
|
Python
|
mit
|
CodeYellowBV/django-binder
|
Add tests for prefix Q expression
|
from unittest import TestCase
from django.db.models import Q
from binder.views import prefix_q_expression
from binder.permissions.views import is_q_child_equal
from .testapp.models import Animal
class TestPrefixQExpression(TestCase):
def test_simple_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1), 'prefix'),
Q(prefix__foo=1),
))
def test_nested_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1) & ~Q(bar=2) | Q(baz=3), 'prefix'),
Q(prefix__foo=1) & ~Q(prefix__bar=2) | Q(prefix__baz=3),
))
def test_prefix_identity(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(pk__in=[]), 'prefix'),
Q(pk__in=[]),
))
def test_antiprefix_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__name='Bokito'), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', name='Bokito'),
))
def test_antiprefix_no_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_pk(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__pk=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_modifier(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__in=[1, 2, 3]), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk__in=[1, 2, 3]),
))
|
<commit_before><commit_msg>Add tests for prefix Q expression<commit_after>
|
from unittest import TestCase
from django.db.models import Q
from binder.views import prefix_q_expression
from binder.permissions.views import is_q_child_equal
from .testapp.models import Animal
class TestPrefixQExpression(TestCase):
def test_simple_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1), 'prefix'),
Q(prefix__foo=1),
))
def test_nested_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1) & ~Q(bar=2) | Q(baz=3), 'prefix'),
Q(prefix__foo=1) & ~Q(prefix__bar=2) | Q(prefix__baz=3),
))
def test_prefix_identity(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(pk__in=[]), 'prefix'),
Q(pk__in=[]),
))
def test_antiprefix_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__name='Bokito'), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', name='Bokito'),
))
def test_antiprefix_no_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_pk(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__pk=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_modifier(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__in=[1, 2, 3]), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk__in=[1, 2, 3]),
))
|
Add tests for prefix Q expressionfrom unittest import TestCase
from django.db.models import Q
from binder.views import prefix_q_expression
from binder.permissions.views import is_q_child_equal
from .testapp.models import Animal
class TestPrefixQExpression(TestCase):
def test_simple_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1), 'prefix'),
Q(prefix__foo=1),
))
def test_nested_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1) & ~Q(bar=2) | Q(baz=3), 'prefix'),
Q(prefix__foo=1) & ~Q(prefix__bar=2) | Q(prefix__baz=3),
))
def test_prefix_identity(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(pk__in=[]), 'prefix'),
Q(pk__in=[]),
))
def test_antiprefix_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__name='Bokito'), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', name='Bokito'),
))
def test_antiprefix_no_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_pk(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__pk=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_modifier(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__in=[1, 2, 3]), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk__in=[1, 2, 3]),
))
|
<commit_before><commit_msg>Add tests for prefix Q expression<commit_after>from unittest import TestCase
from django.db.models import Q
from binder.views import prefix_q_expression
from binder.permissions.views import is_q_child_equal
from .testapp.models import Animal
class TestPrefixQExpression(TestCase):
def test_simple_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1), 'prefix'),
Q(prefix__foo=1),
))
def test_nested_prefix(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(foo=1) & ~Q(bar=2) | Q(baz=3), 'prefix'),
Q(prefix__foo=1) & ~Q(prefix__bar=2) | Q(prefix__baz=3),
))
def test_prefix_identity(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(pk__in=[]), 'prefix'),
Q(pk__in=[]),
))
def test_antiprefix_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__name='Bokito'), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', name='Bokito'),
))
def test_antiprefix_no_field(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_pk(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__pk=1), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk=1),
))
def test_antiprefix_modifier(self):
self.assertTrue(is_q_child_equal(
prefix_q_expression(Q(name='Apenheul', animals__in=[1, 2, 3]), 'zoo', 'animals', Animal),
Q(zoo__name='Apenheul', pk__in=[1, 2, 3]),
))
|
|
7389c85979127a48e0ce9b2f11e611c212a95524
|
akhet/__init__.py
|
akhet/__init__.py
|
from akhet.static import add_static_route
def includeme(config):
"""Add certain useful methods to a Pyramid ``Configurator`` instance.
Currently this adds the ``.add_static_route()`` method. (See
``pyramid_sqla.static.add_static_route()``.)
"""
config.add_directive('add_static_route', add_static_route)
|
Add 'includeme' function. (Accidently put in SQLAHelper.)
|
Add 'includeme' function. (Accidently put in SQLAHelper.)
|
Python
|
mit
|
Pylons/akhet,hlwsmith/akhet,hlwsmith/akhet,hlwsmith/akhet,Pylons/akhet
|
Add 'includeme' function. (Accidently put in SQLAHelper.)
|
from akhet.static import add_static_route
def includeme(config):
"""Add certain useful methods to a Pyramid ``Configurator`` instance.
Currently this adds the ``.add_static_route()`` method. (See
``pyramid_sqla.static.add_static_route()``.)
"""
config.add_directive('add_static_route', add_static_route)
|
<commit_before><commit_msg>Add 'includeme' function. (Accidently put in SQLAHelper.)<commit_after>
|
from akhet.static import add_static_route
def includeme(config):
"""Add certain useful methods to a Pyramid ``Configurator`` instance.
Currently this adds the ``.add_static_route()`` method. (See
``pyramid_sqla.static.add_static_route()``.)
"""
config.add_directive('add_static_route', add_static_route)
|
Add 'includeme' function. (Accidently put in SQLAHelper.)from akhet.static import add_static_route
def includeme(config):
"""Add certain useful methods to a Pyramid ``Configurator`` instance.
Currently this adds the ``.add_static_route()`` method. (See
``pyramid_sqla.static.add_static_route()``.)
"""
config.add_directive('add_static_route', add_static_route)
|
<commit_before><commit_msg>Add 'includeme' function. (Accidently put in SQLAHelper.)<commit_after>from akhet.static import add_static_route
def includeme(config):
"""Add certain useful methods to a Pyramid ``Configurator`` instance.
Currently this adds the ``.add_static_route()`` method. (See
``pyramid_sqla.static.add_static_route()``.)
"""
config.add_directive('add_static_route', add_static_route)
|
|
a53cf07d2b6f246bae866210fef6ad9988bce885
|
scripts/ttfaddemptyot.py
|
scripts/ttfaddemptyot.py
|
#!/usr/bin/python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('infont', help='Input font file')
parser.add_argument('outfont', help='Output font file')
parser.add_argument('-s','--script',default='DFLT', help='Script tag to generate [DFLT]')
parser.add_argument('-t','--type',default='both', help='Table to create: gpos, gsub, [both]')
args = parser.parse_args()
inf = ttLib.TTFont(args.infont)
for tag in ('GSUB', 'GPOS') :
if tag.lower() == args.type or args.type == 'both' :
table = ttLib.getTableClass(tag)()
t = getattr(otTables, tag, None)()
t.Version = 1.0
t.ScriptList = otTables.ScriptList()
t.ScriptList.ScriptRecord = []
t.FeatureList = otTables.FeatureList()
t.FeatureList.FeatureRecord = []
t.LookupList = otTables.LookupList()
t.LookupList.Lookup = []
srec = otTables.ScriptRecord()
srec.ScriptTag = args.script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
t.ScriptList.ScriptRecord.append(srec)
t.ScriptList.ScriptCount = 1
t.FeatureList.FeatureCount = 0
t.LookupList.LookupCount = 0
table.table = t
inf[tag] = table
inf.save(args.outfont)
|
Add a new fonttools based tool
|
Add a new fonttools based tool
|
Python
|
mit
|
moyogo/pysilfont,moyogo/pysilfont
|
Add a new fonttools based tool
|
#!/usr/bin/python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('infont', help='Input font file')
parser.add_argument('outfont', help='Output font file')
parser.add_argument('-s','--script',default='DFLT', help='Script tag to generate [DFLT]')
parser.add_argument('-t','--type',default='both', help='Table to create: gpos, gsub, [both]')
args = parser.parse_args()
inf = ttLib.TTFont(args.infont)
for tag in ('GSUB', 'GPOS') :
if tag.lower() == args.type or args.type == 'both' :
table = ttLib.getTableClass(tag)()
t = getattr(otTables, tag, None)()
t.Version = 1.0
t.ScriptList = otTables.ScriptList()
t.ScriptList.ScriptRecord = []
t.FeatureList = otTables.FeatureList()
t.FeatureList.FeatureRecord = []
t.LookupList = otTables.LookupList()
t.LookupList.Lookup = []
srec = otTables.ScriptRecord()
srec.ScriptTag = args.script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
t.ScriptList.ScriptRecord.append(srec)
t.ScriptList.ScriptCount = 1
t.FeatureList.FeatureCount = 0
t.LookupList.LookupCount = 0
table.table = t
inf[tag] = table
inf.save(args.outfont)
|
<commit_before><commit_msg>Add a new fonttools based tool<commit_after>
|
#!/usr/bin/python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('infont', help='Input font file')
parser.add_argument('outfont', help='Output font file')
parser.add_argument('-s','--script',default='DFLT', help='Script tag to generate [DFLT]')
parser.add_argument('-t','--type',default='both', help='Table to create: gpos, gsub, [both]')
args = parser.parse_args()
inf = ttLib.TTFont(args.infont)
for tag in ('GSUB', 'GPOS') :
if tag.lower() == args.type or args.type == 'both' :
table = ttLib.getTableClass(tag)()
t = getattr(otTables, tag, None)()
t.Version = 1.0
t.ScriptList = otTables.ScriptList()
t.ScriptList.ScriptRecord = []
t.FeatureList = otTables.FeatureList()
t.FeatureList.FeatureRecord = []
t.LookupList = otTables.LookupList()
t.LookupList.Lookup = []
srec = otTables.ScriptRecord()
srec.ScriptTag = args.script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
t.ScriptList.ScriptRecord.append(srec)
t.ScriptList.ScriptCount = 1
t.FeatureList.FeatureCount = 0
t.LookupList.LookupCount = 0
table.table = t
inf[tag] = table
inf.save(args.outfont)
|
Add a new fonttools based tool#!/usr/bin/python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('infont', help='Input font file')
parser.add_argument('outfont', help='Output font file')
parser.add_argument('-s','--script',default='DFLT', help='Script tag to generate [DFLT]')
parser.add_argument('-t','--type',default='both', help='Table to create: gpos, gsub, [both]')
args = parser.parse_args()
inf = ttLib.TTFont(args.infont)
for tag in ('GSUB', 'GPOS') :
if tag.lower() == args.type or args.type == 'both' :
table = ttLib.getTableClass(tag)()
t = getattr(otTables, tag, None)()
t.Version = 1.0
t.ScriptList = otTables.ScriptList()
t.ScriptList.ScriptRecord = []
t.FeatureList = otTables.FeatureList()
t.FeatureList.FeatureRecord = []
t.LookupList = otTables.LookupList()
t.LookupList.Lookup = []
srec = otTables.ScriptRecord()
srec.ScriptTag = args.script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
t.ScriptList.ScriptRecord.append(srec)
t.ScriptList.ScriptCount = 1
t.FeatureList.FeatureCount = 0
t.LookupList.LookupCount = 0
table.table = t
inf[tag] = table
inf.save(args.outfont)
|
<commit_before><commit_msg>Add a new fonttools based tool<commit_after>#!/usr/bin/python
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('infont', help='Input font file')
parser.add_argument('outfont', help='Output font file')
parser.add_argument('-s','--script',default='DFLT', help='Script tag to generate [DFLT]')
parser.add_argument('-t','--type',default='both', help='Table to create: gpos, gsub, [both]')
args = parser.parse_args()
inf = ttLib.TTFont(args.infont)
for tag in ('GSUB', 'GPOS') :
if tag.lower() == args.type or args.type == 'both' :
table = ttLib.getTableClass(tag)()
t = getattr(otTables, tag, None)()
t.Version = 1.0
t.ScriptList = otTables.ScriptList()
t.ScriptList.ScriptRecord = []
t.FeatureList = otTables.FeatureList()
t.FeatureList.FeatureRecord = []
t.LookupList = otTables.LookupList()
t.LookupList.Lookup = []
srec = otTables.ScriptRecord()
srec.ScriptTag = args.script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
t.ScriptList.ScriptRecord.append(srec)
t.ScriptList.ScriptCount = 1
t.FeatureList.FeatureCount = 0
t.LookupList.LookupCount = 0
table.table = t
inf[tag] = table
inf.save(args.outfont)
|
|
8718254272de33d77de03536b5fe26ad781b47b5
|
astroid/tests/unittest_brain_numpy_core_fromnumeric.py
|
astroid/tests/unittest_brain_numpy_core_fromnumeric.py
|
# -*- encoding=utf-8 -*-
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import contextlib
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
class SubTestWrapper(unittest.TestCase):
"""
A class for supporting all unittest version wether or not subTest is available
"""
def subTest(self, msg=None, **params):
try:
# For python versions above 3.5 this should be ok
return super(SubTestWrapper, self).subTest(msg, **params)
except AttributeError:
# For python versions below 3.5
return subTestMock(msg)
@contextlib.contextmanager
def subTestMock(msg=None):
"""
A mock for subTest which do nothing
"""
yield msg
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFromNumericTest(SubTestWrapper):
"""
Test the numpy core fromnumeric brain module
"""
numpy_functions = (
('sum', "[1, 2]"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
"""
import numpy as np
func = np.{:s}
func({:s})
""".format(
func_name, ",".join(func_args)
)
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = ('.ndarray',)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(len(inferred_values) == 1,
msg="Too much inferred value for {:s}".format(func_[0]))
self.assertTrue(inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(func_[0], inferred_values[-1].pytype()))
if __name__ == "__main__":
unittest.main()
|
Add a unittest dedicated to the numpy_core_fromnumeric brain
|
Add a unittest dedicated to the numpy_core_fromnumeric brain
|
Python
|
lgpl-2.1
|
PyCQA/astroid
|
Add a unittest dedicated to the numpy_core_fromnumeric brain
|
# -*- encoding=utf-8 -*-
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import contextlib
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
class SubTestWrapper(unittest.TestCase):
"""
A class for supporting all unittest version wether or not subTest is available
"""
def subTest(self, msg=None, **params):
try:
# For python versions above 3.5 this should be ok
return super(SubTestWrapper, self).subTest(msg, **params)
except AttributeError:
# For python versions below 3.5
return subTestMock(msg)
@contextlib.contextmanager
def subTestMock(msg=None):
"""
A mock for subTest which do nothing
"""
yield msg
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFromNumericTest(SubTestWrapper):
"""
Test the numpy core fromnumeric brain module
"""
numpy_functions = (
('sum', "[1, 2]"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
"""
import numpy as np
func = np.{:s}
func({:s})
""".format(
func_name, ",".join(func_args)
)
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = ('.ndarray',)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(len(inferred_values) == 1,
msg="Too much inferred value for {:s}".format(func_[0]))
self.assertTrue(inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(func_[0], inferred_values[-1].pytype()))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a unittest dedicated to the numpy_core_fromnumeric brain<commit_after>
|
# -*- encoding=utf-8 -*-
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import contextlib
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
class SubTestWrapper(unittest.TestCase):
"""
A class for supporting all unittest version wether or not subTest is available
"""
def subTest(self, msg=None, **params):
try:
# For python versions above 3.5 this should be ok
return super(SubTestWrapper, self).subTest(msg, **params)
except AttributeError:
# For python versions below 3.5
return subTestMock(msg)
@contextlib.contextmanager
def subTestMock(msg=None):
"""
A mock for subTest which do nothing
"""
yield msg
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFromNumericTest(SubTestWrapper):
"""
Test the numpy core fromnumeric brain module
"""
numpy_functions = (
('sum', "[1, 2]"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
"""
import numpy as np
func = np.{:s}
func({:s})
""".format(
func_name, ",".join(func_args)
)
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = ('.ndarray',)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(len(inferred_values) == 1,
msg="Too much inferred value for {:s}".format(func_[0]))
self.assertTrue(inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(func_[0], inferred_values[-1].pytype()))
if __name__ == "__main__":
unittest.main()
|
Add a unittest dedicated to the numpy_core_fromnumeric brain# -*- encoding=utf-8 -*-
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import contextlib
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
class SubTestWrapper(unittest.TestCase):
"""
A class for supporting all unittest version wether or not subTest is available
"""
def subTest(self, msg=None, **params):
try:
# For python versions above 3.5 this should be ok
return super(SubTestWrapper, self).subTest(msg, **params)
except AttributeError:
# For python versions below 3.5
return subTestMock(msg)
@contextlib.contextmanager
def subTestMock(msg=None):
"""
A mock for subTest which do nothing
"""
yield msg
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFromNumericTest(SubTestWrapper):
"""
Test the numpy core fromnumeric brain module
"""
numpy_functions = (
('sum', "[1, 2]"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
"""
import numpy as np
func = np.{:s}
func({:s})
""".format(
func_name, ",".join(func_args)
)
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = ('.ndarray',)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(len(inferred_values) == 1,
msg="Too much inferred value for {:s}".format(func_[0]))
self.assertTrue(inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(func_[0], inferred_values[-1].pytype()))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a unittest dedicated to the numpy_core_fromnumeric brain<commit_after># -*- encoding=utf-8 -*-
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import unittest
import contextlib
try:
import numpy # pylint: disable=unused-import
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
from astroid import builder
class SubTestWrapper(unittest.TestCase):
"""
A class for supporting all unittest version wether or not subTest is available
"""
def subTest(self, msg=None, **params):
try:
# For python versions above 3.5 this should be ok
return super(SubTestWrapper, self).subTest(msg, **params)
except AttributeError:
# For python versions below 3.5
return subTestMock(msg)
@contextlib.contextmanager
def subTestMock(msg=None):
"""
A mock for subTest which do nothing
"""
yield msg
@unittest.skipUnless(HAS_NUMPY, "This test requires the numpy library.")
class BrainNumpyCoreFromNumericTest(SubTestWrapper):
"""
Test the numpy core fromnumeric brain module
"""
numpy_functions = (
('sum', "[1, 2]"),
)
def _inferred_numpy_func_call(self, func_name, *func_args):
node = builder.extract_node(
"""
import numpy as np
func = np.{:s}
func({:s})
""".format(
func_name, ",".join(func_args)
)
)
return node.infer()
def test_numpy_function_calls_inferred_as_ndarray(self):
"""
Test that calls to numpy functions are inferred as numpy.ndarray
"""
licit_array_types = ('.ndarray',)
for func_ in self.numpy_functions:
with self.subTest(typ=func_):
inferred_values = list(self._inferred_numpy_func_call(*func_))
self.assertTrue(len(inferred_values) == 1,
msg="Too much inferred value for {:s}".format(func_[0]))
self.assertTrue(inferred_values[-1].pytype() in licit_array_types,
msg="Illicit type for {:s} ({})".format(func_[0], inferred_values[-1].pytype()))
if __name__ == "__main__":
unittest.main()
|
|
33bc340a84b597ae649c1c394bda3978cae1f789
|
tests/test__chrooting.py
|
tests/test__chrooting.py
|
import os
import platform
import tempfile
from .test_utils import EnvironmentTestCase
_EXPECTED_FILE_IN_MOUNT_NAME = "{mount_name}_file"
class ChrootingTestCase(EnvironmentTestCase):
def setUp(self):
super(ChrootingTestCase, self).setUp()
if os.getuid() != 0:
self.skipTest("Not root")
if platform.system() != "Linux":
self.skipTest("Not linux")
self.environment.load_configuration_file(os.path.join(os.path.dirname(__file__), "..", "example_config.py"))
def test__chrooting(self):
self.assertChrootFileExists("/dwight_base_image_file")
def assertChrootFileExists(self, path):
p = self.environment.run_command_in_chroot("test -e {}".format(path))
self.assertEquals(p.returncode, 0, "File {0!r} does not exist".format(path))
|
Add chrooting test (requires vagrant environment)
|
Add chrooting test (requires vagrant environment)
|
Python
|
bsd-3-clause
|
vmalloc/dwight,vmalloc/dwight,vmalloc/dwight
|
Add chrooting test (requires vagrant environment)
|
import os
import platform
import tempfile
from .test_utils import EnvironmentTestCase
_EXPECTED_FILE_IN_MOUNT_NAME = "{mount_name}_file"
class ChrootingTestCase(EnvironmentTestCase):
def setUp(self):
super(ChrootingTestCase, self).setUp()
if os.getuid() != 0:
self.skipTest("Not root")
if platform.system() != "Linux":
self.skipTest("Not linux")
self.environment.load_configuration_file(os.path.join(os.path.dirname(__file__), "..", "example_config.py"))
def test__chrooting(self):
self.assertChrootFileExists("/dwight_base_image_file")
def assertChrootFileExists(self, path):
p = self.environment.run_command_in_chroot("test -e {}".format(path))
self.assertEquals(p.returncode, 0, "File {0!r} does not exist".format(path))
|
<commit_before><commit_msg>Add chrooting test (requires vagrant environment)<commit_after>
|
import os
import platform
import tempfile
from .test_utils import EnvironmentTestCase
_EXPECTED_FILE_IN_MOUNT_NAME = "{mount_name}_file"
class ChrootingTestCase(EnvironmentTestCase):
def setUp(self):
super(ChrootingTestCase, self).setUp()
if os.getuid() != 0:
self.skipTest("Not root")
if platform.system() != "Linux":
self.skipTest("Not linux")
self.environment.load_configuration_file(os.path.join(os.path.dirname(__file__), "..", "example_config.py"))
def test__chrooting(self):
self.assertChrootFileExists("/dwight_base_image_file")
def assertChrootFileExists(self, path):
p = self.environment.run_command_in_chroot("test -e {}".format(path))
self.assertEquals(p.returncode, 0, "File {0!r} does not exist".format(path))
|
Add chrooting test (requires vagrant environment)import os
import platform
import tempfile
from .test_utils import EnvironmentTestCase
_EXPECTED_FILE_IN_MOUNT_NAME = "{mount_name}_file"
class ChrootingTestCase(EnvironmentTestCase):
def setUp(self):
super(ChrootingTestCase, self).setUp()
if os.getuid() != 0:
self.skipTest("Not root")
if platform.system() != "Linux":
self.skipTest("Not linux")
self.environment.load_configuration_file(os.path.join(os.path.dirname(__file__), "..", "example_config.py"))
def test__chrooting(self):
self.assertChrootFileExists("/dwight_base_image_file")
def assertChrootFileExists(self, path):
p = self.environment.run_command_in_chroot("test -e {}".format(path))
self.assertEquals(p.returncode, 0, "File {0!r} does not exist".format(path))
|
<commit_before><commit_msg>Add chrooting test (requires vagrant environment)<commit_after>import os
import platform
import tempfile
from .test_utils import EnvironmentTestCase
_EXPECTED_FILE_IN_MOUNT_NAME = "{mount_name}_file"
class ChrootingTestCase(EnvironmentTestCase):
def setUp(self):
super(ChrootingTestCase, self).setUp()
if os.getuid() != 0:
self.skipTest("Not root")
if platform.system() != "Linux":
self.skipTest("Not linux")
self.environment.load_configuration_file(os.path.join(os.path.dirname(__file__), "..", "example_config.py"))
def test__chrooting(self):
self.assertChrootFileExists("/dwight_base_image_file")
def assertChrootFileExists(self, path):
p = self.environment.run_command_in_chroot("test -e {}".format(path))
self.assertEquals(p.returncode, 0, "File {0!r} does not exist".format(path))
|
|
14148af45fa9b6a0377df866e202edab561df12d
|
tests/api/tourney/match/comments/test_view_for_match_as_json.py
|
tests/api/tourney/match/comments/test_view_for_match_as_json.py
|
"""
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import match_comment_service, match_service
def test_view_for_match_as_json(api_client, api_client_authz_header, match, comment):
url = f'/api/tourney/matches/{match.id}/comments.json'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.creator.id),
'screen_name': comment.creator.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body': 'Denn man tau.',
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
},
],
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return match_comment_service.create_comment(match.id, user.id, 'Denn man tau.')
|
Test tourney match comments retrieval as JSON
|
Test tourney match comments retrieval as JSON
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps
|
Test tourney match comments retrieval as JSON
|
"""
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import match_comment_service, match_service
def test_view_for_match_as_json(api_client, api_client_authz_header, match, comment):
url = f'/api/tourney/matches/{match.id}/comments.json'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.creator.id),
'screen_name': comment.creator.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body': 'Denn man tau.',
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
},
],
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return match_comment_service.create_comment(match.id, user.id, 'Denn man tau.')
|
<commit_before><commit_msg>Test tourney match comments retrieval as JSON<commit_after>
|
"""
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import match_comment_service, match_service
def test_view_for_match_as_json(api_client, api_client_authz_header, match, comment):
url = f'/api/tourney/matches/{match.id}/comments.json'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.creator.id),
'screen_name': comment.creator.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body': 'Denn man tau.',
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
},
],
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return match_comment_service.create_comment(match.id, user.id, 'Denn man tau.')
|
Test tourney match comments retrieval as JSON"""
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import match_comment_service, match_service
def test_view_for_match_as_json(api_client, api_client_authz_header, match, comment):
url = f'/api/tourney/matches/{match.id}/comments.json'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.creator.id),
'screen_name': comment.creator.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body': 'Denn man tau.',
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
},
],
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return match_comment_service.create_comment(match.id, user.id, 'Denn man tau.')
|
<commit_before><commit_msg>Test tourney match comments retrieval as JSON<commit_after>"""
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
from byceps.services.tourney import match_comment_service, match_service
def test_view_for_match_as_json(api_client, api_client_authz_header, match, comment):
url = f'/api/tourney/matches/{match.id}/comments.json'
headers = [api_client_authz_header]
response = api_client.get(url, headers=headers)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert response.get_json() == {
'comments': [
{
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': {
'user_id': str(comment.creator.id),
'screen_name': comment.creator.screen_name,
'suspended': False,
'deleted': False,
'avatar_url': None,
'is_orga': False,
},
'body': 'Denn man tau.',
'hidden': False,
'hidden_at': None,
'hidden_by_id': None,
},
],
}
# helpers
@pytest.fixture
def match(app):
return match_service.create_match()
@pytest.fixture
def comment(app, match, user):
return match_comment_service.create_comment(match.id, user.id, 'Denn man tau.')
|
|
41b1d7e8d4f719b979cb60db9a4e840cb69ec6d0
|
scripts/MammalSuperTree.py
|
scripts/MammalSuperTree.py
|
#retriever
from retriever.lib.templates import DownloadOnlyTemplate
SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
shortname='mammsupertree',
ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
Add a script to download the mammal super tree
|
Add a script to download the mammal super tree
|
Python
|
mit
|
davharris/retriever,henrykironde/deletedret,goelakash/retriever,henrykironde/deletedret,bendmorris/retriever,davharris/retriever,bendmorris/retriever,davharris/retriever,bendmorris/retriever,embaldridge/retriever,embaldridge/retriever,embaldridge/retriever,goelakash/retriever
|
Add a script to download the mammal super tree
|
#retriever
from retriever.lib.templates import DownloadOnlyTemplate
SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
shortname='mammsupertree',
ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
<commit_before><commit_msg>Add a script to download the mammal super tree<commit_after>
|
#retriever
from retriever.lib.templates import DownloadOnlyTemplate
SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
shortname='mammsupertree',
ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
Add a script to download the mammal super tree#retriever
from retriever.lib.templates import DownloadOnlyTemplate
SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
shortname='mammsupertree',
ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
<commit_before><commit_msg>Add a script to download the mammal super tree<commit_after>#retriever
from retriever.lib.templates import DownloadOnlyTemplate
SCRIPT = DownloadOnlyTemplate(name="Mammal Super Tree",
shortname='mammsupertree',
ref='http://doi.org/10.1111/j.1461-0248.2009.01307.x',
description="Mammal Super Tree from Fritz, S.A., O.R.P Bininda-Emonds, and A. Purvis. 2009. Geographical variation in predictors of mammalian extinction risk: big is bad, but only in the tropics. Ecology Letters 12:538-549",
urls ={'mammal_super_tree_fritz2009.tre': 'http://onlinelibrary.wiley.com/store/10.1111/j.1461-0248.2009.01307.x/asset/supinfo/ELE_1307_sm_SA1.tre?v=1&s=366b28651a9b5d1a3148ef9a8620f8aa31a7df44'})
|
|
c3cb492aed8140e3a58fb09212d236689a677437
|
scripts/draw_square.ext.py
|
scripts/draw_square.ext.py
|
from nxt.motor import PORT_A, PORT_B, PORT_C
from .helpers.robot import Robot, SERVO_NICE
def main():
robot = Robot(debug=True, verbose=True)
# Motors
robot.init_synchronized_motors(PORT_A, PORT_C)
robot.init_servo(PORT_B)
robot.set_servo(SERVO_NICE)
sides = 4 # Spoiler alert! It's a square.
side_length = 10 # In centimeters
angle = 90
for _ in range(sides):
robot.move_forward(dist=side_length)
robot.turn_right(power=50, degrees=(180 - angle))
print('[DONE] Won\'t do anything else.')
if __name__ == '__main__':
main()
|
Add experimental version of 'Draw a square' :fire:
|
Add experimental version of 'Draw a square' :fire:
|
Python
|
mit
|
richin13/nxt-scripts
|
Add experimental version of 'Draw a square' :fire:
|
from nxt.motor import PORT_A, PORT_B, PORT_C
from .helpers.robot import Robot, SERVO_NICE
def main():
robot = Robot(debug=True, verbose=True)
# Motors
robot.init_synchronized_motors(PORT_A, PORT_C)
robot.init_servo(PORT_B)
robot.set_servo(SERVO_NICE)
sides = 4 # Spoiler alert! It's a square.
side_length = 10 # In centimeters
angle = 90
for _ in range(sides):
robot.move_forward(dist=side_length)
robot.turn_right(power=50, degrees=(180 - angle))
print('[DONE] Won\'t do anything else.')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add experimental version of 'Draw a square' :fire:<commit_after>
|
from nxt.motor import PORT_A, PORT_B, PORT_C
from .helpers.robot import Robot, SERVO_NICE
def main():
robot = Robot(debug=True, verbose=True)
# Motors
robot.init_synchronized_motors(PORT_A, PORT_C)
robot.init_servo(PORT_B)
robot.set_servo(SERVO_NICE)
sides = 4 # Spoiler alert! It's a square.
side_length = 10 # In centimeters
angle = 90
for _ in range(sides):
robot.move_forward(dist=side_length)
robot.turn_right(power=50, degrees=(180 - angle))
print('[DONE] Won\'t do anything else.')
if __name__ == '__main__':
main()
|
Add experimental version of 'Draw a square' :fire:from nxt.motor import PORT_A, PORT_B, PORT_C
from .helpers.robot import Robot, SERVO_NICE
def main():
robot = Robot(debug=True, verbose=True)
# Motors
robot.init_synchronized_motors(PORT_A, PORT_C)
robot.init_servo(PORT_B)
robot.set_servo(SERVO_NICE)
sides = 4 # Spoiler alert! It's a square.
side_length = 10 # In centimeters
angle = 90
for _ in range(sides):
robot.move_forward(dist=side_length)
robot.turn_right(power=50, degrees=(180 - angle))
print('[DONE] Won\'t do anything else.')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add experimental version of 'Draw a square' :fire:<commit_after>from nxt.motor import PORT_A, PORT_B, PORT_C
from .helpers.robot import Robot, SERVO_NICE
def main():
robot = Robot(debug=True, verbose=True)
# Motors
robot.init_synchronized_motors(PORT_A, PORT_C)
robot.init_servo(PORT_B)
robot.set_servo(SERVO_NICE)
sides = 4 # Spoiler alert! It's a square.
side_length = 10 # In centimeters
angle = 90
for _ in range(sides):
robot.move_forward(dist=side_length)
robot.turn_right(power=50, degrees=(180 - angle))
print('[DONE] Won\'t do anything else.')
if __name__ == '__main__':
main()
|
|
95a1c54cc728c11d693e19a70e0691c077251d2f
|
tests/quick_test.py
|
tests/quick_test.py
|
#!/usr/bin/env python3
import dirty_water
rxn = dirty_water.Reaction()
rxn.num_reactions = 10
rxn['Cas9'].std_volume = 1, 'μL'
rxn['Cas9'].std_stock_conc = 20, 'μM'
rxn['Cas9'].master_mix = True
rxn['Cas9'].conc = 4
rxn['buffer'].std_volume = 1, 'μL'
rxn['buffer'].std_stock_conc = '10x'
rxn['buffer'].master_mix = True
rxn['sgRNA'].std_volume = 1, 'μL'
rxn['sgRNA'].std_stock_conc = 32, 'μM'
rxn['trypsin'].std_volume = 1, 'μL'
rxn['trypsin'].std_stock_conc = '10x'
rxn['water'].std_volume = 6, 'μL'
rxn['water'].master_mix = True
print(rxn)
|
Add a very weak test of the Reaction API.
|
Add a very weak test of the Reaction API.
|
Python
|
mit
|
kalekundert/dirty_water
|
Add a very weak test of the Reaction API.
|
#!/usr/bin/env python3
import dirty_water
rxn = dirty_water.Reaction()
rxn.num_reactions = 10
rxn['Cas9'].std_volume = 1, 'μL'
rxn['Cas9'].std_stock_conc = 20, 'μM'
rxn['Cas9'].master_mix = True
rxn['Cas9'].conc = 4
rxn['buffer'].std_volume = 1, 'μL'
rxn['buffer'].std_stock_conc = '10x'
rxn['buffer'].master_mix = True
rxn['sgRNA'].std_volume = 1, 'μL'
rxn['sgRNA'].std_stock_conc = 32, 'μM'
rxn['trypsin'].std_volume = 1, 'μL'
rxn['trypsin'].std_stock_conc = '10x'
rxn['water'].std_volume = 6, 'μL'
rxn['water'].master_mix = True
print(rxn)
|
<commit_before><commit_msg>Add a very weak test of the Reaction API.<commit_after>
|
#!/usr/bin/env python3
import dirty_water
rxn = dirty_water.Reaction()
rxn.num_reactions = 10
rxn['Cas9'].std_volume = 1, 'μL'
rxn['Cas9'].std_stock_conc = 20, 'μM'
rxn['Cas9'].master_mix = True
rxn['Cas9'].conc = 4
rxn['buffer'].std_volume = 1, 'μL'
rxn['buffer'].std_stock_conc = '10x'
rxn['buffer'].master_mix = True
rxn['sgRNA'].std_volume = 1, 'μL'
rxn['sgRNA'].std_stock_conc = 32, 'μM'
rxn['trypsin'].std_volume = 1, 'μL'
rxn['trypsin'].std_stock_conc = '10x'
rxn['water'].std_volume = 6, 'μL'
rxn['water'].master_mix = True
print(rxn)
|
Add a very weak test of the Reaction API.#!/usr/bin/env python3
import dirty_water
rxn = dirty_water.Reaction()
rxn.num_reactions = 10
rxn['Cas9'].std_volume = 1, 'μL'
rxn['Cas9'].std_stock_conc = 20, 'μM'
rxn['Cas9'].master_mix = True
rxn['Cas9'].conc = 4
rxn['buffer'].std_volume = 1, 'μL'
rxn['buffer'].std_stock_conc = '10x'
rxn['buffer'].master_mix = True
rxn['sgRNA'].std_volume = 1, 'μL'
rxn['sgRNA'].std_stock_conc = 32, 'μM'
rxn['trypsin'].std_volume = 1, 'μL'
rxn['trypsin'].std_stock_conc = '10x'
rxn['water'].std_volume = 6, 'μL'
rxn['water'].master_mix = True
print(rxn)
|
<commit_before><commit_msg>Add a very weak test of the Reaction API.<commit_after>#!/usr/bin/env python3
import dirty_water
rxn = dirty_water.Reaction()
rxn.num_reactions = 10
rxn['Cas9'].std_volume = 1, 'μL'
rxn['Cas9'].std_stock_conc = 20, 'μM'
rxn['Cas9'].master_mix = True
rxn['Cas9'].conc = 4
rxn['buffer'].std_volume = 1, 'μL'
rxn['buffer'].std_stock_conc = '10x'
rxn['buffer'].master_mix = True
rxn['sgRNA'].std_volume = 1, 'μL'
rxn['sgRNA'].std_stock_conc = 32, 'μM'
rxn['trypsin'].std_volume = 1, 'μL'
rxn['trypsin'].std_stock_conc = '10x'
rxn['water'].std_volume = 6, 'μL'
rxn['water'].master_mix = True
print(rxn)
|
|
5d1b7b38b70dfa17bc7c468fb49e3e576c9accc0
|
tests/range_test.py
|
tests/range_test.py
|
"""Tests for the range class."""
from sympy import sympify
from drudge import Range
def test_range_has_basic_operations():
"""Test the basic operations on ranges."""
a_symb = sympify('a')
b_symb = sympify('b')
bound0 = Range('B', 'a', 'b')
bound1 = Range('B', a_symb, b_symb)
symb0 = Range('S')
symb1 = Range('S')
assert bound0 == bound1
assert hash(bound0) == hash(bound1)
assert symb0 == symb1
assert hash(symb0) == hash(symb1)
assert bound0 != symb0
assert hash(bound0) != hash(symb0)
assert bound0.label == 'B'
assert bound0.lower == a_symb
assert bound0.upper == b_symb
assert bound0.args == (bound1.label, bound1.lower, bound1.upper)
assert symb0.label == 'S'
assert symb0.lower is None
assert symb0.upper is None
assert len(symb0.args) == 1
assert symb0.args[0] == symb1.label
|
Add tests for the symbolic ranges
|
Add tests for the symbolic ranges
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add tests for the symbolic ranges
|
"""Tests for the range class."""
from sympy import sympify
from drudge import Range
def test_range_has_basic_operations():
"""Test the basic operations on ranges."""
a_symb = sympify('a')
b_symb = sympify('b')
bound0 = Range('B', 'a', 'b')
bound1 = Range('B', a_symb, b_symb)
symb0 = Range('S')
symb1 = Range('S')
assert bound0 == bound1
assert hash(bound0) == hash(bound1)
assert symb0 == symb1
assert hash(symb0) == hash(symb1)
assert bound0 != symb0
assert hash(bound0) != hash(symb0)
assert bound0.label == 'B'
assert bound0.lower == a_symb
assert bound0.upper == b_symb
assert bound0.args == (bound1.label, bound1.lower, bound1.upper)
assert symb0.label == 'S'
assert symb0.lower is None
assert symb0.upper is None
assert len(symb0.args) == 1
assert symb0.args[0] == symb1.label
|
<commit_before><commit_msg>Add tests for the symbolic ranges<commit_after>
|
"""Tests for the range class."""
from sympy import sympify
from drudge import Range
def test_range_has_basic_operations():
"""Test the basic operations on ranges."""
a_symb = sympify('a')
b_symb = sympify('b')
bound0 = Range('B', 'a', 'b')
bound1 = Range('B', a_symb, b_symb)
symb0 = Range('S')
symb1 = Range('S')
assert bound0 == bound1
assert hash(bound0) == hash(bound1)
assert symb0 == symb1
assert hash(symb0) == hash(symb1)
assert bound0 != symb0
assert hash(bound0) != hash(symb0)
assert bound0.label == 'B'
assert bound0.lower == a_symb
assert bound0.upper == b_symb
assert bound0.args == (bound1.label, bound1.lower, bound1.upper)
assert symb0.label == 'S'
assert symb0.lower is None
assert symb0.upper is None
assert len(symb0.args) == 1
assert symb0.args[0] == symb1.label
|
Add tests for the symbolic ranges"""Tests for the range class."""
from sympy import sympify
from drudge import Range
def test_range_has_basic_operations():
"""Test the basic operations on ranges."""
a_symb = sympify('a')
b_symb = sympify('b')
bound0 = Range('B', 'a', 'b')
bound1 = Range('B', a_symb, b_symb)
symb0 = Range('S')
symb1 = Range('S')
assert bound0 == bound1
assert hash(bound0) == hash(bound1)
assert symb0 == symb1
assert hash(symb0) == hash(symb1)
assert bound0 != symb0
assert hash(bound0) != hash(symb0)
assert bound0.label == 'B'
assert bound0.lower == a_symb
assert bound0.upper == b_symb
assert bound0.args == (bound1.label, bound1.lower, bound1.upper)
assert symb0.label == 'S'
assert symb0.lower is None
assert symb0.upper is None
assert len(symb0.args) == 1
assert symb0.args[0] == symb1.label
|
<commit_before><commit_msg>Add tests for the symbolic ranges<commit_after>"""Tests for the range class."""
from sympy import sympify
from drudge import Range
def test_range_has_basic_operations():
"""Test the basic operations on ranges."""
a_symb = sympify('a')
b_symb = sympify('b')
bound0 = Range('B', 'a', 'b')
bound1 = Range('B', a_symb, b_symb)
symb0 = Range('S')
symb1 = Range('S')
assert bound0 == bound1
assert hash(bound0) == hash(bound1)
assert symb0 == symb1
assert hash(symb0) == hash(symb1)
assert bound0 != symb0
assert hash(bound0) != hash(symb0)
assert bound0.label == 'B'
assert bound0.lower == a_symb
assert bound0.upper == b_symb
assert bound0.args == (bound1.label, bound1.lower, bound1.upper)
assert symb0.label == 'S'
assert symb0.lower is None
assert symb0.upper is None
assert len(symb0.args) == 1
assert symb0.args[0] == symb1.label
|
|
32f3a42c04a1ae8729e08bf7ce67a1f01f7ac2b3
|
lib/receiving_widget.py
|
lib/receiving_widget.py
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.wallet.labels[address] = label
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
self.clear()
addresses = [addr for addr in self.owner.actuator.wallet.all_addresses() if not self.owner.actuator.wallet.is_change(addr)]
for address in addresses:
history = self.owner.actuator.wallet.history.get(address,[])
used = "No"
for tx_hash, tx_height in history:
tx = self.owner.actuator.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
|
Add receiving widget for lite gui
|
Add receiving widget for lite gui
|
Python
|
mit
|
molecular/electrum,cryptapus/electrum-uno,pknight007/electrum-vtc,cryptapus/electrum-myr,molecular/electrum,cryptapus/electrum-uno,dashpay/electrum-dash,lbryio/lbryum,dabura667/electrum,procrasti/electrum,digitalbitbox/electrum,wakiyamap/electrum-mona,cryptapus/electrum-myr,kyuupichan/electrum,neocogent/electrum,spesmilo/electrum,lbryio/lbryum,FairCoinTeam/electrum-fair,imrehg/electrum,aasiutin/electrum,vertcoin/electrum-vtc,romanz/electrum,procrasti/electrum,asfin/electrum,asfin/electrum,dashpay/electrum-dash,argentumproject/electrum-arg,fireduck64/electrum,procrasti/electrum,dashpay/electrum-dash,cryptapus/electrum,dabura667/electrum,cryptapus/electrum,pooler/electrum-ltc,dashpay/electrum-dash,pknight007/electrum-vtc,procrasti/electrum,vialectrum/vialectrum,argentumproject/electrum-arg,fujicoin/electrum-fjc,cryptapus/electrum-uno,dabura667/electrum,argentumproject/electrum-arg,cryptapus/electrum,fujicoin/electrum-fjc,imrehg/electrum,molecular/electrum,digitalbitbox/electrum,fireduck64/electrum,neocogent/electrum,neocogent/electrum,pooler/electrum-ltc,fyookball/electrum,cryptapus/electrum-myr,protonn/Electrum-Cash,digitalbitbox/electrum,fireduck64/electrum,aasiutin/electrum,protonn/Electrum-Cash,pknight007/electrum-vtc,FairCoinTeam/electrum-fair,FairCoinTeam/electrum-fair,spesmilo/electrum,dabura667/electrum,vialectrum/vialectrum,vertcoin/electrum-vtc,vertcoin/electrum-vtc,romanz/electrum,kyuupichan/electrum,romanz/electrum,molecular/electrum,kyuupichan/electrum,pooler/electrum-ltc,aasiutin/electrum,digitalbitbox/electrum,aasiutin/electrum,argentumproject/electrum-arg,vertcoin/electrum-vtc,wakiyamap/electrum-mona,fyookball/electrum,wakiyamap/electrum-mona,spesmilo/electrum,protonn/Electrum-Cash,fireduck64/electrum,asfin/electrum,vialectrum/vialectrum,spesmilo/electrum,wakiyamap/electrum-mona,cryptapus/electrum-uno,cryptapus/electrum-myr,pknight007/electrum-vtc,fyookball/electrum,pooler/electrum-ltc,FairCoinTeam/electrum-fair,fujicoin/electrum-fjc,imrehg/electrum,protonn/Electrum-Cash,imrehg/electrum
|
Add receiving widget for lite gui
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.wallet.labels[address] = label
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
self.clear()
addresses = [addr for addr in self.owner.actuator.wallet.all_addresses() if not self.owner.actuator.wallet.is_change(addr)]
for address in addresses:
history = self.owner.actuator.wallet.history.get(address,[])
used = "No"
for tx_hash, tx_height in history:
tx = self.owner.actuator.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
|
<commit_before><commit_msg>Add receiving widget for lite gui<commit_after>
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.wallet.labels[address] = label
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
self.clear()
addresses = [addr for addr in self.owner.actuator.wallet.all_addresses() if not self.owner.actuator.wallet.is_change(addr)]
for address in addresses:
history = self.owner.actuator.wallet.history.get(address,[])
used = "No"
for tx_hash, tx_height in history:
tx = self.owner.actuator.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
|
Add receiving widget for lite guifrom PyQt4.QtGui import *
from PyQt4.QtCore import *
from i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.wallet.labels[address] = label
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
self.clear()
addresses = [addr for addr in self.owner.actuator.wallet.all_addresses() if not self.owner.actuator.wallet.is_change(addr)]
for address in addresses:
history = self.owner.actuator.wallet.history.get(address,[])
used = "No"
for tx_hash, tx_height in history:
tx = self.owner.actuator.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
|
<commit_before><commit_msg>Add receiving widget for lite gui<commit_after>from PyQt4.QtGui import *
from PyQt4.QtCore import *
from i18n import _
class ReceivingWidget(QTreeWidget):
def toggle_used(self):
if self.hide_used:
self.hide_used = False
self.setColumnHidden(2, False)
else:
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
def edit_label(self, item, column):
if column == 1 and item.isSelected():
self.editing = True
item.setFlags(Qt.ItemIsEditable|Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editItem(item, column)
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled)
self.editing = False
def update_label(self, item, column):
if self.editing:
return
else:
address = str(item.text(0))
label = unicode( item.text(1) )
self.owner.actuator.wallet.labels[address] = label
def copy_address(self):
address = self.currentItem().text(0)
qApp.clipboard().setText(address)
def update_list(self):
self.clear()
addresses = [addr for addr in self.owner.actuator.wallet.all_addresses() if not self.owner.actuator.wallet.is_change(addr)]
for address in addresses:
history = self.owner.actuator.wallet.history.get(address,[])
used = "No"
for tx_hash, tx_height in history:
tx = self.owner.actuator.wallet.transactions.get(tx_hash)
if tx:
used = "Yes"
if(self.hide_used == True and used == "No") or self.hide_used == False:
label = self.owner.actuator.wallet.labels.get(address,'')
item = QTreeWidgetItem([address, label, used])
self.insertTopLevelItem(0, item)
def __init__(self, owner=None):
self.owner = owner
self.editing = False
QTreeWidget.__init__(self, owner)
self.setColumnCount(3)
self.setHeaderLabels([_("Address"), _("Label"), _("Used")])
self.setIndentation(0)
self.hide_used = True
self.setColumnHidden(2, True)
self.update_list()
|
|
36efa6e2615bdd600d80cc87ada27984c9806135
|
articles/search_indexes.py
|
articles/search_indexes.py
|
from haystack.indexes import *
from haystack import site
from models import Article
class ArticleIndex(SearchIndex):
name = CharField(model_attr='title')
text = CharField(document=True, use_template=True)
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.active()
site.register(Article, ArticleIndex)
|
Add search index file - currently search across Articles, extend to search across Categories?
|
Add search index file - currently search across Articles, extend to search across Categories?
|
Python
|
bsd-2-clause
|
incuna/feincms-articles,michaelkuty/feincms-articles,incuna/feincms-articles,michaelkuty/feincms-articles
|
Add search index file - currently search across Articles, extend to search across Categories?
|
from haystack.indexes import *
from haystack import site
from models import Article
class ArticleIndex(SearchIndex):
name = CharField(model_attr='title')
text = CharField(document=True, use_template=True)
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.active()
site.register(Article, ArticleIndex)
|
<commit_before><commit_msg>Add search index file - currently search across Articles, extend to search across Categories?<commit_after>
|
from haystack.indexes import *
from haystack import site
from models import Article
class ArticleIndex(SearchIndex):
name = CharField(model_attr='title')
text = CharField(document=True, use_template=True)
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.active()
site.register(Article, ArticleIndex)
|
Add search index file - currently search across Articles, extend to search across Categories?from haystack.indexes import *
from haystack import site
from models import Article
class ArticleIndex(SearchIndex):
name = CharField(model_attr='title')
text = CharField(document=True, use_template=True)
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.active()
site.register(Article, ArticleIndex)
|
<commit_before><commit_msg>Add search index file - currently search across Articles, extend to search across Categories?<commit_after>from haystack.indexes import *
from haystack import site
from models import Article
class ArticleIndex(SearchIndex):
name = CharField(model_attr='title')
text = CharField(document=True, use_template=True)
def get_queryset(self):
"""Used when the entire index for model is updated."""
return Article.objects.active()
site.register(Article, ArticleIndex)
|
|
b2bc2f08d69ea42f9cecf6f994b1ed3c8e9f8022
|
py/minimum-number-of-arrows-to-burst-balloons.py
|
py/minimum-number-of-arrows-to-burst-balloons.py
|
from operator import itemgetter
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=itemgetter(1))
end = None
ans = 0
for p in points:
if end is None or end < p[0]:
end = p[1]
ans += 1
return ans
|
Add py solution for 452. Minimum Number of Arrows to Burst Balloons
|
Add py solution for 452. Minimum Number of Arrows to Burst Balloons
452. Minimum Number of Arrows to Burst Balloons: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 452. Minimum Number of Arrows to Burst Balloons
452. Minimum Number of Arrows to Burst Balloons: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/
|
from operator import itemgetter
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=itemgetter(1))
end = None
ans = 0
for p in points:
if end is None or end < p[0]:
end = p[1]
ans += 1
return ans
|
<commit_before><commit_msg>Add py solution for 452. Minimum Number of Arrows to Burst Balloons
452. Minimum Number of Arrows to Burst Balloons: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/<commit_after>
|
from operator import itemgetter
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=itemgetter(1))
end = None
ans = 0
for p in points:
if end is None or end < p[0]:
end = p[1]
ans += 1
return ans
|
Add py solution for 452. Minimum Number of Arrows to Burst Balloons
452. Minimum Number of Arrows to Burst Balloons: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/from operator import itemgetter
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=itemgetter(1))
end = None
ans = 0
for p in points:
if end is None or end < p[0]:
end = p[1]
ans += 1
return ans
|
<commit_before><commit_msg>Add py solution for 452. Minimum Number of Arrows to Burst Balloons
452. Minimum Number of Arrows to Burst Balloons: https://leetcode.com/problems/minimum-number-of-arrows-to-burst-balloons/<commit_after>from operator import itemgetter
class Solution(object):
def findMinArrowShots(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=itemgetter(1))
end = None
ans = 0
for p in points:
if end is None or end < p[0]:
end = p[1]
ans += 1
return ans
|
|
1360b8c4f6422dc8149ba40bb9a0717e796049e0
|
tests/test_posix.py
|
tests/test_posix.py
|
import sys
import nose.tools
from simuvex import SimState
def test_file_create():
# Create a state first
state = SimState(arch="AMD64", mode='symbolic')
# Create a file
fd = state.posix.open("test", "wb")
nose.tools.assert_equal(fd, 3)
def test_file_read():
state = SimState(arch="AMD64", mode='symbolic')
content = state.se.BVV(0xbadf00d, 32)
content_size = content.size() / 8
fd = state.posix.open("test", "wb")
state.posix.write(fd, content, content_size)
state.posix.seek(fd, 0, 0)
state.posix.read(fd, 0xc0000000, content_size)
data = state.memory.load(0xc0000000, content_size)
nose.tools.assert_true(state.se.is_true(data == content))
def test_file_seek():
# TODO: Make this test more complete
SEEK_SET = 0
SEEK_END = 1
SEEK_CUR = 2
state = SimState(arch="AMD64", mode='symbolic')
# Normal seeking
fd = state.posix.open("test", "wb")
r = state.posix.seek(fd, 0, SEEK_SET)
nose.tools.assert_equal(r, 0)
state.posix.close(fd)
# TODO: test case: seek cannot go beyond the file size or current file pos
# TODO: test case: seek should not work for stdin/stdout/stderr
# Seek from the end
fd = state.posix.open("test", "wb")
state.posix.files[fd].size = 20
state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_true(state.se.is_true(state.posix.files[fd].pos == 19))
state.posix.close(fd)
# cannot seek from a file whose size is unknown
fd = state.posix.open("unknown_size", "wb")
r = state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_equal(r, -1)
state.posix.close(fd)
def main():
g = globals()
if len(sys.argv) > 1:
f = "test_" + sys.argv[1]
if f in g and hasattr(g[f], "__call__"):
g[f]()
else:
for f, func in g.iteritems():
if f.startswith("test_") and hasattr(func, "__call__"):
func()
if __name__ == "__main__":
main()
|
Add a very simple test case for posix file support.
|
Add a very simple test case for posix file support.
|
Python
|
bsd-2-clause
|
angr/angr,f-prettyland/angr,axt/angr,schieb/angr,schieb/angr,chubbymaggie/angr,iamahuman/angr,tyb0807/angr,axt/angr,chubbymaggie/simuvex,schieb/angr,angr/angr,chubbymaggie/simuvex,tyb0807/angr,angr/angr,chubbymaggie/simuvex,f-prettyland/angr,f-prettyland/angr,axt/angr,angr/simuvex,chubbymaggie/angr,iamahuman/angr,iamahuman/angr,chubbymaggie/angr,tyb0807/angr
|
Add a very simple test case for posix file support.
|
import sys
import nose.tools
from simuvex import SimState
def test_file_create():
# Create a state first
state = SimState(arch="AMD64", mode='symbolic')
# Create a file
fd = state.posix.open("test", "wb")
nose.tools.assert_equal(fd, 3)
def test_file_read():
state = SimState(arch="AMD64", mode='symbolic')
content = state.se.BVV(0xbadf00d, 32)
content_size = content.size() / 8
fd = state.posix.open("test", "wb")
state.posix.write(fd, content, content_size)
state.posix.seek(fd, 0, 0)
state.posix.read(fd, 0xc0000000, content_size)
data = state.memory.load(0xc0000000, content_size)
nose.tools.assert_true(state.se.is_true(data == content))
def test_file_seek():
# TODO: Make this test more complete
SEEK_SET = 0
SEEK_END = 1
SEEK_CUR = 2
state = SimState(arch="AMD64", mode='symbolic')
# Normal seeking
fd = state.posix.open("test", "wb")
r = state.posix.seek(fd, 0, SEEK_SET)
nose.tools.assert_equal(r, 0)
state.posix.close(fd)
# TODO: test case: seek cannot go beyond the file size or current file pos
# TODO: test case: seek should not work for stdin/stdout/stderr
# Seek from the end
fd = state.posix.open("test", "wb")
state.posix.files[fd].size = 20
state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_true(state.se.is_true(state.posix.files[fd].pos == 19))
state.posix.close(fd)
# cannot seek from a file whose size is unknown
fd = state.posix.open("unknown_size", "wb")
r = state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_equal(r, -1)
state.posix.close(fd)
def main():
g = globals()
if len(sys.argv) > 1:
f = "test_" + sys.argv[1]
if f in g and hasattr(g[f], "__call__"):
g[f]()
else:
for f, func in g.iteritems():
if f.startswith("test_") and hasattr(func, "__call__"):
func()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a very simple test case for posix file support.<commit_after>
|
import sys
import nose.tools
from simuvex import SimState
def test_file_create():
# Create a state first
state = SimState(arch="AMD64", mode='symbolic')
# Create a file
fd = state.posix.open("test", "wb")
nose.tools.assert_equal(fd, 3)
def test_file_read():
state = SimState(arch="AMD64", mode='symbolic')
content = state.se.BVV(0xbadf00d, 32)
content_size = content.size() / 8
fd = state.posix.open("test", "wb")
state.posix.write(fd, content, content_size)
state.posix.seek(fd, 0, 0)
state.posix.read(fd, 0xc0000000, content_size)
data = state.memory.load(0xc0000000, content_size)
nose.tools.assert_true(state.se.is_true(data == content))
def test_file_seek():
# TODO: Make this test more complete
SEEK_SET = 0
SEEK_END = 1
SEEK_CUR = 2
state = SimState(arch="AMD64", mode='symbolic')
# Normal seeking
fd = state.posix.open("test", "wb")
r = state.posix.seek(fd, 0, SEEK_SET)
nose.tools.assert_equal(r, 0)
state.posix.close(fd)
# TODO: test case: seek cannot go beyond the file size or current file pos
# TODO: test case: seek should not work for stdin/stdout/stderr
# Seek from the end
fd = state.posix.open("test", "wb")
state.posix.files[fd].size = 20
state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_true(state.se.is_true(state.posix.files[fd].pos == 19))
state.posix.close(fd)
# cannot seek from a file whose size is unknown
fd = state.posix.open("unknown_size", "wb")
r = state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_equal(r, -1)
state.posix.close(fd)
def main():
g = globals()
if len(sys.argv) > 1:
f = "test_" + sys.argv[1]
if f in g and hasattr(g[f], "__call__"):
g[f]()
else:
for f, func in g.iteritems():
if f.startswith("test_") and hasattr(func, "__call__"):
func()
if __name__ == "__main__":
main()
|
Add a very simple test case for posix file support.
import sys
import nose.tools
from simuvex import SimState
def test_file_create():
# Create a state first
state = SimState(arch="AMD64", mode='symbolic')
# Create a file
fd = state.posix.open("test", "wb")
nose.tools.assert_equal(fd, 3)
def test_file_read():
state = SimState(arch="AMD64", mode='symbolic')
content = state.se.BVV(0xbadf00d, 32)
content_size = content.size() / 8
fd = state.posix.open("test", "wb")
state.posix.write(fd, content, content_size)
state.posix.seek(fd, 0, 0)
state.posix.read(fd, 0xc0000000, content_size)
data = state.memory.load(0xc0000000, content_size)
nose.tools.assert_true(state.se.is_true(data == content))
def test_file_seek():
# TODO: Make this test more complete
SEEK_SET = 0
SEEK_END = 1
SEEK_CUR = 2
state = SimState(arch="AMD64", mode='symbolic')
# Normal seeking
fd = state.posix.open("test", "wb")
r = state.posix.seek(fd, 0, SEEK_SET)
nose.tools.assert_equal(r, 0)
state.posix.close(fd)
# TODO: test case: seek cannot go beyond the file size or current file pos
# TODO: test case: seek should not work for stdin/stdout/stderr
# Seek from the end
fd = state.posix.open("test", "wb")
state.posix.files[fd].size = 20
state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_true(state.se.is_true(state.posix.files[fd].pos == 19))
state.posix.close(fd)
# cannot seek from a file whose size is unknown
fd = state.posix.open("unknown_size", "wb")
r = state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_equal(r, -1)
state.posix.close(fd)
def main():
g = globals()
if len(sys.argv) > 1:
f = "test_" + sys.argv[1]
if f in g and hasattr(g[f], "__call__"):
g[f]()
else:
for f, func in g.iteritems():
if f.startswith("test_") and hasattr(func, "__call__"):
func()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a very simple test case for posix file support.<commit_after>
import sys
import nose.tools
from simuvex import SimState
def test_file_create():
# Create a state first
state = SimState(arch="AMD64", mode='symbolic')
# Create a file
fd = state.posix.open("test", "wb")
nose.tools.assert_equal(fd, 3)
def test_file_read():
state = SimState(arch="AMD64", mode='symbolic')
content = state.se.BVV(0xbadf00d, 32)
content_size = content.size() / 8
fd = state.posix.open("test", "wb")
state.posix.write(fd, content, content_size)
state.posix.seek(fd, 0, 0)
state.posix.read(fd, 0xc0000000, content_size)
data = state.memory.load(0xc0000000, content_size)
nose.tools.assert_true(state.se.is_true(data == content))
def test_file_seek():
# TODO: Make this test more complete
SEEK_SET = 0
SEEK_END = 1
SEEK_CUR = 2
state = SimState(arch="AMD64", mode='symbolic')
# Normal seeking
fd = state.posix.open("test", "wb")
r = state.posix.seek(fd, 0, SEEK_SET)
nose.tools.assert_equal(r, 0)
state.posix.close(fd)
# TODO: test case: seek cannot go beyond the file size or current file pos
# TODO: test case: seek should not work for stdin/stdout/stderr
# Seek from the end
fd = state.posix.open("test", "wb")
state.posix.files[fd].size = 20
state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_true(state.se.is_true(state.posix.files[fd].pos == 19))
state.posix.close(fd)
# cannot seek from a file whose size is unknown
fd = state.posix.open("unknown_size", "wb")
r = state.posix.seek(fd, 0, SEEK_END)
nose.tools.assert_equal(r, -1)
state.posix.close(fd)
def main():
g = globals()
if len(sys.argv) > 1:
f = "test_" + sys.argv[1]
if f in g and hasattr(g[f], "__call__"):
g[f]()
else:
for f, func in g.iteritems():
if f.startswith("test_") and hasattr(func, "__call__"):
func()
if __name__ == "__main__":
main()
|
|
328c373495e94005a97a907ac023cb52bc47e20c
|
find_dups.py
|
find_dups.py
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
Add script to email users of name-conflicting packages.
|
Add script to email users of name-conflicting packages.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@532 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1
|
Python
|
bsd-3-clause
|
ericholscher/pypi,ericholscher/pypi
|
Add script to email users of name-conflicting packages.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@532 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
<commit_before><commit_msg>Add script to email users of name-conflicting packages.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@532 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1<commit_after>
|
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
Add script to email users of name-conflicting packages.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@532 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
<commit_before><commit_msg>Add script to email users of name-conflicting packages.
git-svn-id: 757818eefc3e095bf4f5c16d67ad3f55b5150c3d@532 072f9a9a-8cf7-0310-8ca5-bf92c90cb7c1<commit_after>import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt
import store, config
try:
config = config.Config(sys.argv[1], 'webui')
except IndexError:
print "Usage: find_dups.py config.ini"
raise SystemExit
store = store.Store(config)
store.open()
def owner_email(p):
result = set()
for r,u in store.get_package_roles(p):
if r == 'Owner':
result.add(store.get_user(u)['email'])
return result
def mail_dup(email, package1, package2):
email = "martin@v.loewis.de"
f = os.popen("/usr/lib/sendmail "+email, "w")
f.write("To: %s\n" % email)
f.write("From: martin@v.loewis.de\n")
f.write("Subject: Please cleanup PyPI package names\n\n")
f.write("Dear Package Owner,\n")
f.write("You have currently registered the following to packages,\n")
f.write("which differ only in case:\n\n%s\n%s\n\n" % (package1, package2))
f.write("As a recent policy change, we are now rejecting this kind of\n")
f.write("setup. Please remove one of packages.\n\n")
f.write("If you need assistance, please let me know.\n\n")
f.write("Kind regards,\nMartin v. Loewis\n")
f.close()
lower = {}
for name,version in store.get_packages():
lname = name.lower()
if lname in lower:
owner1 = owner_email(name)
owner2 = owner_email(lower[lname])
owners = owner1.intersection(owner2)
if owners:
mail_dup(owners.pop(),name,lower[lname])
else:
print "Distinct dup", name, lower[lname], owner1, owner2
lower[lname] = name
|
|
c6928a6070aaf0fadfb1d9f4071a8440224a247a
|
tests/printInput.py
|
tests/printInput.py
|
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
sys.exit("Usage:\n\t%s <testcase>" % (sys.argv[0],))
testcase = sys.argv[1]
exec(open(testcase).read())
for i in ['input']:
if i not in locals():
sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i))
print(input)
|
Add script to extract input from testcases.
|
Add script to extract input from testcases.
|
Python
|
apache-2.0
|
alviano/wasp,alviano/wasp,alviano/wasp,Yarrick13/hwasp,gaste/dwasp,gaste/dwasp,Yarrick13/hwasp,gaste/dwasp,Yarrick13/hwasp
|
Add script to extract input from testcases.
|
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
sys.exit("Usage:\n\t%s <testcase>" % (sys.argv[0],))
testcase = sys.argv[1]
exec(open(testcase).read())
for i in ['input']:
if i not in locals():
sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i))
print(input)
|
<commit_before><commit_msg>Add script to extract input from testcases.<commit_after>
|
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
sys.exit("Usage:\n\t%s <testcase>" % (sys.argv[0],))
testcase = sys.argv[1]
exec(open(testcase).read())
for i in ['input']:
if i not in locals():
sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i))
print(input)
|
Add script to extract input from testcases.#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
sys.exit("Usage:\n\t%s <testcase>" % (sys.argv[0],))
testcase = sys.argv[1]
exec(open(testcase).read())
for i in ['input']:
if i not in locals():
sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i))
print(input)
|
<commit_before><commit_msg>Add script to extract input from testcases.<commit_after>#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
sys.exit("Usage:\n\t%s <testcase>" % (sys.argv[0],))
testcase = sys.argv[1]
exec(open(testcase).read())
for i in ['input']:
if i not in locals():
sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i))
print(input)
|
|
ff179b6215366676d92d39f255682781fe6b40eb
|
oscarapi/tests/testvoucher.py
|
oscarapi/tests/testvoucher.py
|
from django.utils import timezone
from oscar.core.loading import get_model
from oscarapi.tests.utils import APITest
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
class VoucherTest(APITest):
fixtures = [
'product', 'productcategory', 'productattribute', 'productclass',
'productattributevalue', 'category', 'attributeoptiongroup',
'attributeoption', 'stockrecord', 'partner', 'voucher'
]
def setUp(self):
# Adjust offer dates so it's valid
offer = ConditionalOffer.objects.get(
name="Offer for voucher 'testvoucher'")
offer.start_datetime = timezone.now()
offer.end_datetime = timezone.now() + timezone.timedelta(days=1)
offer.save()
# adjust voucher dates for testing the view
voucher = Voucher.objects.get(name="testvoucher")
voucher.start_datetime = timezone.now()
voucher.end_datetime = timezone.now() + timezone.timedelta(days=1)
voucher.save()
super(VoucherTest, self).setUp()
def test_basket_add_voucher(self):
"""Check if we can add a voucher with the add-voucher api call"""
# first add two products to our basket
self.response = self.post(
'api-basket-add-product',
url="http://testserver/api/products/1/",
quantity=2)
self.response.assertStatusEqual(200)
# total should be 20
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '20.00')
# add a voucher and see if the voucher is added correctly
self.response = self.post(
'api-basket-add-voucher',
vouchercode='TESTVOUCHER')
self.response.assertStatusEqual(200)
# see if the discount of 5.00 from the voucher was applied
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '15.00')
|
Add a test for the api-basket-add-voucher view
|
Add a test for the api-basket-add-voucher view
|
Python
|
bsd-3-clause
|
regulusweb/django-oscar-api,crgwbr/django-oscar-api
|
Add a test for the api-basket-add-voucher view
|
from django.utils import timezone
from oscar.core.loading import get_model
from oscarapi.tests.utils import APITest
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
class VoucherTest(APITest):
fixtures = [
'product', 'productcategory', 'productattribute', 'productclass',
'productattributevalue', 'category', 'attributeoptiongroup',
'attributeoption', 'stockrecord', 'partner', 'voucher'
]
def setUp(self):
# Adjust offer dates so it's valid
offer = ConditionalOffer.objects.get(
name="Offer for voucher 'testvoucher'")
offer.start_datetime = timezone.now()
offer.end_datetime = timezone.now() + timezone.timedelta(days=1)
offer.save()
# adjust voucher dates for testing the view
voucher = Voucher.objects.get(name="testvoucher")
voucher.start_datetime = timezone.now()
voucher.end_datetime = timezone.now() + timezone.timedelta(days=1)
voucher.save()
super(VoucherTest, self).setUp()
def test_basket_add_voucher(self):
"""Check if we can add a voucher with the add-voucher api call"""
# first add two products to our basket
self.response = self.post(
'api-basket-add-product',
url="http://testserver/api/products/1/",
quantity=2)
self.response.assertStatusEqual(200)
# total should be 20
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '20.00')
# add a voucher and see if the voucher is added correctly
self.response = self.post(
'api-basket-add-voucher',
vouchercode='TESTVOUCHER')
self.response.assertStatusEqual(200)
# see if the discount of 5.00 from the voucher was applied
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '15.00')
|
<commit_before><commit_msg>Add a test for the api-basket-add-voucher view<commit_after>
|
from django.utils import timezone
from oscar.core.loading import get_model
from oscarapi.tests.utils import APITest
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
class VoucherTest(APITest):
fixtures = [
'product', 'productcategory', 'productattribute', 'productclass',
'productattributevalue', 'category', 'attributeoptiongroup',
'attributeoption', 'stockrecord', 'partner', 'voucher'
]
def setUp(self):
# Adjust offer dates so it's valid
offer = ConditionalOffer.objects.get(
name="Offer for voucher 'testvoucher'")
offer.start_datetime = timezone.now()
offer.end_datetime = timezone.now() + timezone.timedelta(days=1)
offer.save()
# adjust voucher dates for testing the view
voucher = Voucher.objects.get(name="testvoucher")
voucher.start_datetime = timezone.now()
voucher.end_datetime = timezone.now() + timezone.timedelta(days=1)
voucher.save()
super(VoucherTest, self).setUp()
def test_basket_add_voucher(self):
"""Check if we can add a voucher with the add-voucher api call"""
# first add two products to our basket
self.response = self.post(
'api-basket-add-product',
url="http://testserver/api/products/1/",
quantity=2)
self.response.assertStatusEqual(200)
# total should be 20
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '20.00')
# add a voucher and see if the voucher is added correctly
self.response = self.post(
'api-basket-add-voucher',
vouchercode='TESTVOUCHER')
self.response.assertStatusEqual(200)
# see if the discount of 5.00 from the voucher was applied
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '15.00')
|
Add a test for the api-basket-add-voucher viewfrom django.utils import timezone
from oscar.core.loading import get_model
from oscarapi.tests.utils import APITest
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
class VoucherTest(APITest):
fixtures = [
'product', 'productcategory', 'productattribute', 'productclass',
'productattributevalue', 'category', 'attributeoptiongroup',
'attributeoption', 'stockrecord', 'partner', 'voucher'
]
def setUp(self):
# Adjust offer dates so it's valid
offer = ConditionalOffer.objects.get(
name="Offer for voucher 'testvoucher'")
offer.start_datetime = timezone.now()
offer.end_datetime = timezone.now() + timezone.timedelta(days=1)
offer.save()
# adjust voucher dates for testing the view
voucher = Voucher.objects.get(name="testvoucher")
voucher.start_datetime = timezone.now()
voucher.end_datetime = timezone.now() + timezone.timedelta(days=1)
voucher.save()
super(VoucherTest, self).setUp()
def test_basket_add_voucher(self):
"""Check if we can add a voucher with the add-voucher api call"""
# first add two products to our basket
self.response = self.post(
'api-basket-add-product',
url="http://testserver/api/products/1/",
quantity=2)
self.response.assertStatusEqual(200)
# total should be 20
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '20.00')
# add a voucher and see if the voucher is added correctly
self.response = self.post(
'api-basket-add-voucher',
vouchercode='TESTVOUCHER')
self.response.assertStatusEqual(200)
# see if the discount of 5.00 from the voucher was applied
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '15.00')
|
<commit_before><commit_msg>Add a test for the api-basket-add-voucher view<commit_after>from django.utils import timezone
from oscar.core.loading import get_model
from oscarapi.tests.utils import APITest
Basket = get_model('basket', 'Basket')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
class VoucherTest(APITest):
fixtures = [
'product', 'productcategory', 'productattribute', 'productclass',
'productattributevalue', 'category', 'attributeoptiongroup',
'attributeoption', 'stockrecord', 'partner', 'voucher'
]
def setUp(self):
# Adjust offer dates so it's valid
offer = ConditionalOffer.objects.get(
name="Offer for voucher 'testvoucher'")
offer.start_datetime = timezone.now()
offer.end_datetime = timezone.now() + timezone.timedelta(days=1)
offer.save()
# adjust voucher dates for testing the view
voucher = Voucher.objects.get(name="testvoucher")
voucher.start_datetime = timezone.now()
voucher.end_datetime = timezone.now() + timezone.timedelta(days=1)
voucher.save()
super(VoucherTest, self).setUp()
def test_basket_add_voucher(self):
"""Check if we can add a voucher with the add-voucher api call"""
# first add two products to our basket
self.response = self.post(
'api-basket-add-product',
url="http://testserver/api/products/1/",
quantity=2)
self.response.assertStatusEqual(200)
# total should be 20
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '20.00')
# add a voucher and see if the voucher is added correctly
self.response = self.post(
'api-basket-add-voucher',
vouchercode='TESTVOUCHER')
self.response.assertStatusEqual(200)
# see if the discount of 5.00 from the voucher was applied
self.response = self.get('api-basket')
self.response.assertValueEqual('total_incl_tax', '15.00')
|
|
dbaccbddfe36c32444c61df5ac6ffd238d3d022b
|
py/longest-word-in-dictionary.py
|
py/longest-word-in-dictionary.py
|
class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words = sorted(words, key=lambda w:(len(w), w))
prefix_dict = set()
max_word = ''
for w in words:
if len(w) == 1 or w[:-1] in prefix_dict:
prefix_dict.add(w)
if len(w) > len(max_word) or w < max_word:
max_word = w
return max_word
|
Add py solution for 720. Longest Word in Dictionary
|
Add py solution for 720. Longest Word in Dictionary
720. Longest Word in Dictionary: https://leetcode.com/problems/longest-word-in-dictionary/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 720. Longest Word in Dictionary
720. Longest Word in Dictionary: https://leetcode.com/problems/longest-word-in-dictionary/
|
class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words = sorted(words, key=lambda w:(len(w), w))
prefix_dict = set()
max_word = ''
for w in words:
if len(w) == 1 or w[:-1] in prefix_dict:
prefix_dict.add(w)
if len(w) > len(max_word) or w < max_word:
max_word = w
return max_word
|
<commit_before><commit_msg>Add py solution for 720. Longest Word in Dictionary
720. Longest Word in Dictionary: https://leetcode.com/problems/longest-word-in-dictionary/<commit_after>
|
class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words = sorted(words, key=lambda w:(len(w), w))
prefix_dict = set()
max_word = ''
for w in words:
if len(w) == 1 or w[:-1] in prefix_dict:
prefix_dict.add(w)
if len(w) > len(max_word) or w < max_word:
max_word = w
return max_word
|
Add py solution for 720. Longest Word in Dictionary
720. Longest Word in Dictionary: https://leetcode.com/problems/longest-word-in-dictionary/class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words = sorted(words, key=lambda w:(len(w), w))
prefix_dict = set()
max_word = ''
for w in words:
if len(w) == 1 or w[:-1] in prefix_dict:
prefix_dict.add(w)
if len(w) > len(max_word) or w < max_word:
max_word = w
return max_word
|
<commit_before><commit_msg>Add py solution for 720. Longest Word in Dictionary
720. Longest Word in Dictionary: https://leetcode.com/problems/longest-word-in-dictionary/<commit_after>class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words = sorted(words, key=lambda w:(len(w), w))
prefix_dict = set()
max_word = ''
for w in words:
if len(w) == 1 or w[:-1] in prefix_dict:
prefix_dict.add(w)
if len(w) > len(max_word) or w < max_word:
max_word = w
return max_word
|
|
e7be538e04e775ad63d0e7c54d7bea6b902e09fb
|
elo.py
|
elo.py
|
##
# elo.py
# provides implementation of ELO algorithm to determine
# change in fitness given a malicious act of nature.
##
K_FACTOR = 50 # weighting factor. The larger the more significant a match
# ^ in chess it's usually 15-16 for grandmasters and 32 for weak players
BETA = 400
class ResultType:
WIN = 1
LOSE = 2
DRAW = 3
'''
Returns a tuple (newELOYou, newELOThem)
example usage:
(2100NewElo, 2000NewElo) = get_elos_for_result(2100, 2000, ResultType.LOSE)
'''
def get_elos_for_result(eloYou, eloOpponent, result):
# get expected values
expectedYou = 1. / (1.+10.**(float(eloOpponent - eloYou)/BETA))
expectedThem = 1 - expectedYou # 1. / (1+10**((eloYou - eloOpponent)/BETA))
# actual scores
if result == ResultType.WIN:
actualYou = 1
elif result == ResultType.LOSE:
actualYou = 0
else:
actualYou = 0.5
actualThem = 1. - actualYou
newELOYou = eloYou + K_FACTOR * (actualYou - expectedYou)
newELOOpponent = eloOpponent + K_FACTOR * (actualThem - expectedThem)
return (newELOYou, newELOOpponent)
'''
Test to make sure elo calculated correctly given constants
'''
def test_elos():
def eloTuplesEqual(tupleA, tupleB):
return abs(tupleA[0] - tupleB[0]) <= 1 and abs(tupleA[1] - tupleB[1]) <= 1
assert eloTuplesEqual(get_elos_for_result(1200, 1200, ResultType.DRAW), (1200, 1200))
assert eloTuplesEqual(get_elos_for_result(800, 1400, ResultType.DRAW), (823, 1377))
assert eloTuplesEqual(get_elos_for_result(200, 2000, ResultType.WIN), (249, 1951))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.WIN), (2117, 1983))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.LOSE), (2067, 2033))
print "tests pass"
#test_elos()
|
Add ELO algorithm code, with small tests.
|
Add ELO algorithm code, with small tests.
|
Python
|
mit
|
anishathalye/evolution-chamber,anishathalye/evolution-chamber,techx/hackmit-evolution-chamber,anishathalye/evolution-chamber,anishathalye/evolution-chamber,techx/hackmit-evolution-chamber,techx/hackmit-evolution-chamber,techx/hackmit-evolution-chamber
|
Add ELO algorithm code, with small tests.
|
##
# elo.py
# provides implementation of ELO algorithm to determine
# change in fitness given a malicious act of nature.
##
K_FACTOR = 50 # weighting factor. The larger the more significant a match
# ^ in chess it's usually 15-16 for grandmasters and 32 for weak players
BETA = 400
class ResultType:
WIN = 1
LOSE = 2
DRAW = 3
'''
Returns a tuple (newELOYou, newELOThem)
example usage:
(2100NewElo, 2000NewElo) = get_elos_for_result(2100, 2000, ResultType.LOSE)
'''
def get_elos_for_result(eloYou, eloOpponent, result):
# get expected values
expectedYou = 1. / (1.+10.**(float(eloOpponent - eloYou)/BETA))
expectedThem = 1 - expectedYou # 1. / (1+10**((eloYou - eloOpponent)/BETA))
# actual scores
if result == ResultType.WIN:
actualYou = 1
elif result == ResultType.LOSE:
actualYou = 0
else:
actualYou = 0.5
actualThem = 1. - actualYou
newELOYou = eloYou + K_FACTOR * (actualYou - expectedYou)
newELOOpponent = eloOpponent + K_FACTOR * (actualThem - expectedThem)
return (newELOYou, newELOOpponent)
'''
Test to make sure elo calculated correctly given constants
'''
def test_elos():
def eloTuplesEqual(tupleA, tupleB):
return abs(tupleA[0] - tupleB[0]) <= 1 and abs(tupleA[1] - tupleB[1]) <= 1
assert eloTuplesEqual(get_elos_for_result(1200, 1200, ResultType.DRAW), (1200, 1200))
assert eloTuplesEqual(get_elos_for_result(800, 1400, ResultType.DRAW), (823, 1377))
assert eloTuplesEqual(get_elos_for_result(200, 2000, ResultType.WIN), (249, 1951))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.WIN), (2117, 1983))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.LOSE), (2067, 2033))
print "tests pass"
#test_elos()
|
<commit_before><commit_msg>Add ELO algorithm code, with small tests.<commit_after>
|
##
# elo.py
# provides implementation of ELO algorithm to determine
# change in fitness given a malicious act of nature.
##
K_FACTOR = 50 # weighting factor. The larger the more significant a match
# ^ in chess it's usually 15-16 for grandmasters and 32 for weak players
BETA = 400
class ResultType:
WIN = 1
LOSE = 2
DRAW = 3
'''
Returns a tuple (newELOYou, newELOThem)
example usage:
(2100NewElo, 2000NewElo) = get_elos_for_result(2100, 2000, ResultType.LOSE)
'''
def get_elos_for_result(eloYou, eloOpponent, result):
# get expected values
expectedYou = 1. / (1.+10.**(float(eloOpponent - eloYou)/BETA))
expectedThem = 1 - expectedYou # 1. / (1+10**((eloYou - eloOpponent)/BETA))
# actual scores
if result == ResultType.WIN:
actualYou = 1
elif result == ResultType.LOSE:
actualYou = 0
else:
actualYou = 0.5
actualThem = 1. - actualYou
newELOYou = eloYou + K_FACTOR * (actualYou - expectedYou)
newELOOpponent = eloOpponent + K_FACTOR * (actualThem - expectedThem)
return (newELOYou, newELOOpponent)
'''
Test to make sure elo calculated correctly given constants
'''
def test_elos():
def eloTuplesEqual(tupleA, tupleB):
return abs(tupleA[0] - tupleB[0]) <= 1 and abs(tupleA[1] - tupleB[1]) <= 1
assert eloTuplesEqual(get_elos_for_result(1200, 1200, ResultType.DRAW), (1200, 1200))
assert eloTuplesEqual(get_elos_for_result(800, 1400, ResultType.DRAW), (823, 1377))
assert eloTuplesEqual(get_elos_for_result(200, 2000, ResultType.WIN), (249, 1951))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.WIN), (2117, 1983))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.LOSE), (2067, 2033))
print "tests pass"
#test_elos()
|
Add ELO algorithm code, with small tests.##
# elo.py
# provides implementation of ELO algorithm to determine
# change in fitness given a malicious act of nature.
##
K_FACTOR = 50 # weighting factor. The larger the more significant a match
# ^ in chess it's usually 15-16 for grandmasters and 32 for weak players
BETA = 400
class ResultType:
WIN = 1
LOSE = 2
DRAW = 3
'''
Returns a tuple (newELOYou, newELOThem)
example usage:
(2100NewElo, 2000NewElo) = get_elos_for_result(2100, 2000, ResultType.LOSE)
'''
def get_elos_for_result(eloYou, eloOpponent, result):
# get expected values
expectedYou = 1. / (1.+10.**(float(eloOpponent - eloYou)/BETA))
expectedThem = 1 - expectedYou # 1. / (1+10**((eloYou - eloOpponent)/BETA))
# actual scores
if result == ResultType.WIN:
actualYou = 1
elif result == ResultType.LOSE:
actualYou = 0
else:
actualYou = 0.5
actualThem = 1. - actualYou
newELOYou = eloYou + K_FACTOR * (actualYou - expectedYou)
newELOOpponent = eloOpponent + K_FACTOR * (actualThem - expectedThem)
return (newELOYou, newELOOpponent)
'''
Test to make sure elo calculated correctly given constants
'''
def test_elos():
def eloTuplesEqual(tupleA, tupleB):
return abs(tupleA[0] - tupleB[0]) <= 1 and abs(tupleA[1] - tupleB[1]) <= 1
assert eloTuplesEqual(get_elos_for_result(1200, 1200, ResultType.DRAW), (1200, 1200))
assert eloTuplesEqual(get_elos_for_result(800, 1400, ResultType.DRAW), (823, 1377))
assert eloTuplesEqual(get_elos_for_result(200, 2000, ResultType.WIN), (249, 1951))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.WIN), (2117, 1983))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.LOSE), (2067, 2033))
print "tests pass"
#test_elos()
|
<commit_before><commit_msg>Add ELO algorithm code, with small tests.<commit_after>##
# elo.py
# provides implementation of ELO algorithm to determine
# change in fitness given a malicious act of nature.
##
K_FACTOR = 50 # weighting factor. The larger the more significant a match
# ^ in chess it's usually 15-16 for grandmasters and 32 for weak players
BETA = 400
class ResultType:
WIN = 1
LOSE = 2
DRAW = 3
'''
Returns a tuple (newELOYou, newELOThem)
example usage:
(2100NewElo, 2000NewElo) = get_elos_for_result(2100, 2000, ResultType.LOSE)
'''
def get_elos_for_result(eloYou, eloOpponent, result):
# get expected values
expectedYou = 1. / (1.+10.**(float(eloOpponent - eloYou)/BETA))
expectedThem = 1 - expectedYou # 1. / (1+10**((eloYou - eloOpponent)/BETA))
# actual scores
if result == ResultType.WIN:
actualYou = 1
elif result == ResultType.LOSE:
actualYou = 0
else:
actualYou = 0.5
actualThem = 1. - actualYou
newELOYou = eloYou + K_FACTOR * (actualYou - expectedYou)
newELOOpponent = eloOpponent + K_FACTOR * (actualThem - expectedThem)
return (newELOYou, newELOOpponent)
'''
Test to make sure elo calculated correctly given constants
'''
def test_elos():
def eloTuplesEqual(tupleA, tupleB):
return abs(tupleA[0] - tupleB[0]) <= 1 and abs(tupleA[1] - tupleB[1]) <= 1
assert eloTuplesEqual(get_elos_for_result(1200, 1200, ResultType.DRAW), (1200, 1200))
assert eloTuplesEqual(get_elos_for_result(800, 1400, ResultType.DRAW), (823, 1377))
assert eloTuplesEqual(get_elos_for_result(200, 2000, ResultType.WIN), (249, 1951))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.WIN), (2117, 1983))
assert eloTuplesEqual(get_elos_for_result(2100, 2000, ResultType.LOSE), (2067, 2033))
print "tests pass"
#test_elos()
|
|
4c1953e570a15661cffa68c151e00bbc6345ad8d
|
ipaqe_provision_hosts/backend/loader.py
|
ipaqe_provision_hosts/backend/loader.py
|
# Author: Milan Kubik, 2017
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
return entry_points
|
Add basic backend loading function
|
Add basic backend loading function
|
Python
|
mit
|
apophys/ipaqe-provision-hosts
|
Add basic backend loading function
|
# Author: Milan Kubik, 2017
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
return entry_points
|
<commit_before><commit_msg>Add basic backend loading function<commit_after>
|
# Author: Milan Kubik, 2017
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
return entry_points
|
Add basic backend loading function# Author: Milan Kubik, 2017
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
return entry_points
|
<commit_before><commit_msg>Add basic backend loading function<commit_after># Author: Milan Kubik, 2017
import logging
from pkg_resources import iter_entry_points
RESOURCE_GROUP = "ipaqe_provision_hosts.backends"
log = logging.getLogger(__name__)
def load_backends(exclude=()):
log.debug("Loading entry points from %s.", RESOURCE_GROUP)
entry_points = {
ep.name: ep.load() for ep in iter_entry_points(RESOURCE_GROUP)
if ep.name not in exclude
}
return entry_points
|
|
cfc471de36961ff90a2131100fa7a87da69c656b
|
Motors/motorTest.py
|
Motors/motorTest.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.1
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, 19200)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
n = 0
while True:
n = n+1
t = time.time()-t0
print "n= ", n, ", t= ", t
order = 0.3*np.sin(t)
alpha = np.round(order, 2)
msg = "ORPWM"+","+str(alpha)
msg = "$"+msg +"*"+ computeXORChecksum(msg) + chr(13).encode('ascii')
print msg
ser.write(msg)
try:
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
time.sleep(dt)
3
ser.close()
|
Add a script to check motor (and communication!)
|
Add a script to check motor (and communication!)
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a script to check motor (and communication!)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.1
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, 19200)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
n = 0
while True:
n = n+1
t = time.time()-t0
print "n= ", n, ", t= ", t
order = 0.3*np.sin(t)
alpha = np.round(order, 2)
msg = "ORPWM"+","+str(alpha)
msg = "$"+msg +"*"+ computeXORChecksum(msg) + chr(13).encode('ascii')
print msg
ser.write(msg)
try:
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
time.sleep(dt)
3
ser.close()
|
<commit_before><commit_msg>Add a script to check motor (and communication!)<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.1
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, 19200)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
n = 0
while True:
n = n+1
t = time.time()-t0
print "n= ", n, ", t= ", t
order = 0.3*np.sin(t)
alpha = np.round(order, 2)
msg = "ORPWM"+","+str(alpha)
msg = "$"+msg +"*"+ computeXORChecksum(msg) + chr(13).encode('ascii')
print msg
ser.write(msg)
try:
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
time.sleep(dt)
3
ser.close()
|
Add a script to check motor (and communication!)#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.1
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, 19200)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
n = 0
while True:
n = n+1
t = time.time()-t0
print "n= ", n, ", t= ", t
order = 0.3*np.sin(t)
alpha = np.round(order, 2)
msg = "ORPWM"+","+str(alpha)
msg = "$"+msg +"*"+ computeXORChecksum(msg) + chr(13).encode('ascii')
print msg
ser.write(msg)
try:
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
time.sleep(dt)
3
ser.close()
|
<commit_before><commit_msg>Add a script to check motor (and communication!)<commit_after>#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.1
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, 19200)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
n = 0
while True:
n = n+1
t = time.time()-t0
print "n= ", n, ", t= ", t
order = 0.3*np.sin(t)
alpha = np.round(order, 2)
msg = "ORPWM"+","+str(alpha)
msg = "$"+msg +"*"+ computeXORChecksum(msg) + chr(13).encode('ascii')
print msg
ser.write(msg)
try:
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
time.sleep(dt)
3
ser.close()
|
|
f3984fd4fdcfa8ddad779fb934362d043a5f8a00
|
tests/test_cli.py
|
tests/test_cli.py
|
#!/usr/bin/env python
import pytest
from click.testing import CliRunner
from compaction import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
result = runner.invoke(cli.main, ["--version"])
assert result.exit_code == 0
assert "version" in result.output
|
Add a simple test for the command line interface.
|
Add a simple test for the command line interface.
|
Python
|
mit
|
mcflugen/compaction
|
Add a simple test for the command line interface.
|
#!/usr/bin/env python
import pytest
from click.testing import CliRunner
from compaction import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
result = runner.invoke(cli.main, ["--version"])
assert result.exit_code == 0
assert "version" in result.output
|
<commit_before><commit_msg>Add a simple test for the command line interface.<commit_after>
|
#!/usr/bin/env python
import pytest
from click.testing import CliRunner
from compaction import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
result = runner.invoke(cli.main, ["--version"])
assert result.exit_code == 0
assert "version" in result.output
|
Add a simple test for the command line interface.#!/usr/bin/env python
import pytest
from click.testing import CliRunner
from compaction import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
result = runner.invoke(cli.main, ["--version"])
assert result.exit_code == 0
assert "version" in result.output
|
<commit_before><commit_msg>Add a simple test for the command line interface.<commit_after>#!/usr/bin/env python
import pytest
from click.testing import CliRunner
from compaction import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main, ["--help"])
assert result.exit_code == 0
result = runner.invoke(cli.main, ["--version"])
assert result.exit_code == 0
assert "version" in result.output
|
|
9397ebff413e557fff6aa08a3160cdfecb74fc7e
|
pybo/demos/basic_pes.py
|
pybo/demos/basic_pes.py
|
import numpy as np
import benchfunk
import reggie
import mwhutils.plotting as mp
import mwhutils.grid as mg
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
if __name__ == '__main__':
# grab a test function and points at which to plot things
s = 0.001
model = reggie.make_gp(s, 1.1, 0.05, 0)
bounds = [[0, 5]]
f = benchfunk.PriorFunction(model, bounds, 100)
x = mg.regular(bounds, 500)
# get initial data
X = inits.init_latin(bounds, 1)
Y = np.array([f(x_) for x_ in X])
# initialize the model
model.add_data(X, Y)
while True:
xbest = recommenders.best_latent(model, bounds)
index = policies.PES(model, bounds)(x)
xnext = x[index.argmax()]
# get the posterior at test points
mu, s2 = model.predict(x)
# create a figure and hold it
fig = mp.figure(num=1, rows=2)
fig.hold()
# plot the posterior
fig[0].plot_banded(x.ravel(), mu, 2*np.sqrt(s2))
fig[0].plot(x.ravel(), f.get_f(x))
fig[0].scatter(model.data[0].ravel(), model.data[1])
fig[0].vline(xbest)
# plot the acquisition function
fig[1].plot_banded(x.ravel(), index)
fig[1].vline(xnext)
# draw
fig.draw()
model.add_data(xnext, f(xnext))
|
Add a PES basic demo.
|
Add a PES basic demo.
|
Python
|
bsd-2-clause
|
mwhoffman/pybo
|
Add a PES basic demo.
|
import numpy as np
import benchfunk
import reggie
import mwhutils.plotting as mp
import mwhutils.grid as mg
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
if __name__ == '__main__':
# grab a test function and points at which to plot things
s = 0.001
model = reggie.make_gp(s, 1.1, 0.05, 0)
bounds = [[0, 5]]
f = benchfunk.PriorFunction(model, bounds, 100)
x = mg.regular(bounds, 500)
# get initial data
X = inits.init_latin(bounds, 1)
Y = np.array([f(x_) for x_ in X])
# initialize the model
model.add_data(X, Y)
while True:
xbest = recommenders.best_latent(model, bounds)
index = policies.PES(model, bounds)(x)
xnext = x[index.argmax()]
# get the posterior at test points
mu, s2 = model.predict(x)
# create a figure and hold it
fig = mp.figure(num=1, rows=2)
fig.hold()
# plot the posterior
fig[0].plot_banded(x.ravel(), mu, 2*np.sqrt(s2))
fig[0].plot(x.ravel(), f.get_f(x))
fig[0].scatter(model.data[0].ravel(), model.data[1])
fig[0].vline(xbest)
# plot the acquisition function
fig[1].plot_banded(x.ravel(), index)
fig[1].vline(xnext)
# draw
fig.draw()
model.add_data(xnext, f(xnext))
|
<commit_before><commit_msg>Add a PES basic demo.<commit_after>
|
import numpy as np
import benchfunk
import reggie
import mwhutils.plotting as mp
import mwhutils.grid as mg
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
if __name__ == '__main__':
# grab a test function and points at which to plot things
s = 0.001
model = reggie.make_gp(s, 1.1, 0.05, 0)
bounds = [[0, 5]]
f = benchfunk.PriorFunction(model, bounds, 100)
x = mg.regular(bounds, 500)
# get initial data
X = inits.init_latin(bounds, 1)
Y = np.array([f(x_) for x_ in X])
# initialize the model
model.add_data(X, Y)
while True:
xbest = recommenders.best_latent(model, bounds)
index = policies.PES(model, bounds)(x)
xnext = x[index.argmax()]
# get the posterior at test points
mu, s2 = model.predict(x)
# create a figure and hold it
fig = mp.figure(num=1, rows=2)
fig.hold()
# plot the posterior
fig[0].plot_banded(x.ravel(), mu, 2*np.sqrt(s2))
fig[0].plot(x.ravel(), f.get_f(x))
fig[0].scatter(model.data[0].ravel(), model.data[1])
fig[0].vline(xbest)
# plot the acquisition function
fig[1].plot_banded(x.ravel(), index)
fig[1].vline(xnext)
# draw
fig.draw()
model.add_data(xnext, f(xnext))
|
Add a PES basic demo.import numpy as np
import benchfunk
import reggie
import mwhutils.plotting as mp
import mwhutils.grid as mg
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
if __name__ == '__main__':
# grab a test function and points at which to plot things
s = 0.001
model = reggie.make_gp(s, 1.1, 0.05, 0)
bounds = [[0, 5]]
f = benchfunk.PriorFunction(model, bounds, 100)
x = mg.regular(bounds, 500)
# get initial data
X = inits.init_latin(bounds, 1)
Y = np.array([f(x_) for x_ in X])
# initialize the model
model.add_data(X, Y)
while True:
xbest = recommenders.best_latent(model, bounds)
index = policies.PES(model, bounds)(x)
xnext = x[index.argmax()]
# get the posterior at test points
mu, s2 = model.predict(x)
# create a figure and hold it
fig = mp.figure(num=1, rows=2)
fig.hold()
# plot the posterior
fig[0].plot_banded(x.ravel(), mu, 2*np.sqrt(s2))
fig[0].plot(x.ravel(), f.get_f(x))
fig[0].scatter(model.data[0].ravel(), model.data[1])
fig[0].vline(xbest)
# plot the acquisition function
fig[1].plot_banded(x.ravel(), index)
fig[1].vline(xnext)
# draw
fig.draw()
model.add_data(xnext, f(xnext))
|
<commit_before><commit_msg>Add a PES basic demo.<commit_after>import numpy as np
import benchfunk
import reggie
import mwhutils.plotting as mp
import mwhutils.grid as mg
from pybo import inits
from pybo import policies
from pybo import solvers
from pybo import recommenders
if __name__ == '__main__':
# grab a test function and points at which to plot things
s = 0.001
model = reggie.make_gp(s, 1.1, 0.05, 0)
bounds = [[0, 5]]
f = benchfunk.PriorFunction(model, bounds, 100)
x = mg.regular(bounds, 500)
# get initial data
X = inits.init_latin(bounds, 1)
Y = np.array([f(x_) for x_ in X])
# initialize the model
model.add_data(X, Y)
while True:
xbest = recommenders.best_latent(model, bounds)
index = policies.PES(model, bounds)(x)
xnext = x[index.argmax()]
# get the posterior at test points
mu, s2 = model.predict(x)
# create a figure and hold it
fig = mp.figure(num=1, rows=2)
fig.hold()
# plot the posterior
fig[0].plot_banded(x.ravel(), mu, 2*np.sqrt(s2))
fig[0].plot(x.ravel(), f.get_f(x))
fig[0].scatter(model.data[0].ravel(), model.data[1])
fig[0].vline(xbest)
# plot the acquisition function
fig[1].plot_banded(x.ravel(), index)
fig[1].vline(xnext)
# draw
fig.draw()
model.add_data(xnext, f(xnext))
|
|
5516b9997671d03c06549dcd99611df56d79b779
|
cors_webserver.py
|
cors_webserver.py
|
#!/usr/bin/env python
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple web server serving local files that permits cross-origin requests.
This can be used to view local data with Neuroglancer.
WARNING: Because this web server permits cross-origin requests, it exposes any
data in the directory that is served to any web page running on a machine that
can connect to the web server.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
try:
# Python3 and Python2 with future package.
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--port', default=9000, help='TCP port to listen on')
ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
args = ap.parse_args()
os.chdir(args.directory)
server = Server((args.bind, args.port))
sa = server.socket.getsockname()
print("Serving directory %s at http://%s:%d" % (os.getcwd(), sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit(0)
|
Add simple web server for testing Neuroglancer with local data
|
feat: Add simple web server for testing Neuroglancer with local data
|
Python
|
apache-2.0
|
seung-lab/neuroglancer,seung-lab/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,seung-lab/neuroglancer,google/neuroglancer,seung-lab/neuroglancer,seung-lab/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,janelia-flyem/neuroglancer,seung-lab/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,google/neuroglancer,seung-lab/neuroglancer,google/neuroglancer
|
feat: Add simple web server for testing Neuroglancer with local data
|
#!/usr/bin/env python
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple web server serving local files that permits cross-origin requests.
This can be used to view local data with Neuroglancer.
WARNING: Because this web server permits cross-origin requests, it exposes any
data in the directory that is served to any web page running on a machine that
can connect to the web server.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
try:
# Python3 and Python2 with future package.
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--port', default=9000, help='TCP port to listen on')
ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
args = ap.parse_args()
os.chdir(args.directory)
server = Server((args.bind, args.port))
sa = server.socket.getsockname()
print("Serving directory %s at http://%s:%d" % (os.getcwd(), sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit(0)
|
<commit_before><commit_msg>feat: Add simple web server for testing Neuroglancer with local data<commit_after>
|
#!/usr/bin/env python
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple web server serving local files that permits cross-origin requests.
This can be used to view local data with Neuroglancer.
WARNING: Because this web server permits cross-origin requests, it exposes any
data in the directory that is served to any web page running on a machine that
can connect to the web server.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
try:
# Python3 and Python2 with future package.
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--port', default=9000, help='TCP port to listen on')
ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
args = ap.parse_args()
os.chdir(args.directory)
server = Server((args.bind, args.port))
sa = server.socket.getsockname()
print("Serving directory %s at http://%s:%d" % (os.getcwd(), sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit(0)
|
feat: Add simple web server for testing Neuroglancer with local data#!/usr/bin/env python
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple web server serving local files that permits cross-origin requests.
This can be used to view local data with Neuroglancer.
WARNING: Because this web server permits cross-origin requests, it exposes any
data in the directory that is served to any web page running on a machine that
can connect to the web server.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
try:
# Python3 and Python2 with future package.
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--port', default=9000, help='TCP port to listen on')
ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
args = ap.parse_args()
os.chdir(args.directory)
server = Server((args.bind, args.port))
sa = server.socket.getsockname()
print("Serving directory %s at http://%s:%d" % (os.getcwd(), sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit(0)
|
<commit_before><commit_msg>feat: Add simple web server for testing Neuroglancer with local data<commit_after>#!/usr/bin/env python
# @license
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple web server serving local files that permits cross-origin requests.
This can be used to view local data with Neuroglancer.
WARNING: Because this web server permits cross-origin requests, it exposes any
data in the directory that is served to any web page running on a machine that
can connect to the web server.
"""
from __future__ import print_function, absolute_import
import argparse
import os
import sys
try:
# Python3 and Python2 with future package.
from http.server import SimpleHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
class Server(HTTPServer):
protocol_version = 'HTTP/1.1'
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, RequestHandler)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--port', default=9000, help='TCP port to listen on')
ap.add_argument('-a', '--bind', default='127.0.0.1', help='Bind address')
ap.add_argument('-d', '--directory', default='.', help='Directory to serve')
args = ap.parse_args()
os.chdir(args.directory)
server = Server((args.bind, args.port))
sa = server.socket.getsockname()
print("Serving directory %s at http://%s:%d" % (os.getcwd(), sa[0], sa[1]))
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit(0)
|
|
9a6c467ef34596c8adccc294f3132fe67c983608
|
scripts/copy_overlay.py
|
scripts/copy_overlay.py
|
"""Copy overlay from one dataset to a derived dataset."""
import click
import dtoolcore
def ensure_uri(path_or_uri):
if ':' in path_or_uri:
return path_or_uri
else:
return "disk:{}".format(path_or_uri)
@click.command()
@click.argument('src_dataset_path')
@click.argument('dst_dataset_path')
@click.argument('overlay_name')
def main(src_dataset_path, dst_dataset_path, overlay_name):
src_uri = ensure_uri(src_dataset_path)
dst_uri = ensure_uri(dst_dataset_path)
src_dataset = dtoolcore.DataSet.from_uri(src_uri)
dst_dataset = dtoolcore.DataSet.from_uri(dst_uri)
src_overlay = src_dataset.get_overlay(overlay_name)
dst_overlay = {}
from_overlay = dst_dataset.get_overlay('from')
for dst_id in dst_dataset.identifiers:
src_id = from_overlay[dst_id]
dst_overlay[dst_id] = src_overlay[src_id]
dst_dataset.put_overlay(overlay_name, dst_overlay)
if __name__ == '__main__':
main()
|
Add script to copy overlay from one dataset to another
|
Add script to copy overlay from one dataset to another
|
Python
|
mit
|
JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field,JIC-Image-Analysis/senescence-in-field
|
Add script to copy overlay from one dataset to another
|
"""Copy overlay from one dataset to a derived dataset."""
import click
import dtoolcore
def ensure_uri(path_or_uri):
if ':' in path_or_uri:
return path_or_uri
else:
return "disk:{}".format(path_or_uri)
@click.command()
@click.argument('src_dataset_path')
@click.argument('dst_dataset_path')
@click.argument('overlay_name')
def main(src_dataset_path, dst_dataset_path, overlay_name):
src_uri = ensure_uri(src_dataset_path)
dst_uri = ensure_uri(dst_dataset_path)
src_dataset = dtoolcore.DataSet.from_uri(src_uri)
dst_dataset = dtoolcore.DataSet.from_uri(dst_uri)
src_overlay = src_dataset.get_overlay(overlay_name)
dst_overlay = {}
from_overlay = dst_dataset.get_overlay('from')
for dst_id in dst_dataset.identifiers:
src_id = from_overlay[dst_id]
dst_overlay[dst_id] = src_overlay[src_id]
dst_dataset.put_overlay(overlay_name, dst_overlay)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to copy overlay from one dataset to another<commit_after>
|
"""Copy overlay from one dataset to a derived dataset."""
import click
import dtoolcore
def ensure_uri(path_or_uri):
if ':' in path_or_uri:
return path_or_uri
else:
return "disk:{}".format(path_or_uri)
@click.command()
@click.argument('src_dataset_path')
@click.argument('dst_dataset_path')
@click.argument('overlay_name')
def main(src_dataset_path, dst_dataset_path, overlay_name):
src_uri = ensure_uri(src_dataset_path)
dst_uri = ensure_uri(dst_dataset_path)
src_dataset = dtoolcore.DataSet.from_uri(src_uri)
dst_dataset = dtoolcore.DataSet.from_uri(dst_uri)
src_overlay = src_dataset.get_overlay(overlay_name)
dst_overlay = {}
from_overlay = dst_dataset.get_overlay('from')
for dst_id in dst_dataset.identifiers:
src_id = from_overlay[dst_id]
dst_overlay[dst_id] = src_overlay[src_id]
dst_dataset.put_overlay(overlay_name, dst_overlay)
if __name__ == '__main__':
main()
|
Add script to copy overlay from one dataset to another"""Copy overlay from one dataset to a derived dataset."""
import click
import dtoolcore
def ensure_uri(path_or_uri):
if ':' in path_or_uri:
return path_or_uri
else:
return "disk:{}".format(path_or_uri)
@click.command()
@click.argument('src_dataset_path')
@click.argument('dst_dataset_path')
@click.argument('overlay_name')
def main(src_dataset_path, dst_dataset_path, overlay_name):
src_uri = ensure_uri(src_dataset_path)
dst_uri = ensure_uri(dst_dataset_path)
src_dataset = dtoolcore.DataSet.from_uri(src_uri)
dst_dataset = dtoolcore.DataSet.from_uri(dst_uri)
src_overlay = src_dataset.get_overlay(overlay_name)
dst_overlay = {}
from_overlay = dst_dataset.get_overlay('from')
for dst_id in dst_dataset.identifiers:
src_id = from_overlay[dst_id]
dst_overlay[dst_id] = src_overlay[src_id]
dst_dataset.put_overlay(overlay_name, dst_overlay)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to copy overlay from one dataset to another<commit_after>"""Copy overlay from one dataset to a derived dataset."""
import click
import dtoolcore
def ensure_uri(path_or_uri):
if ':' in path_or_uri:
return path_or_uri
else:
return "disk:{}".format(path_or_uri)
@click.command()
@click.argument('src_dataset_path')
@click.argument('dst_dataset_path')
@click.argument('overlay_name')
def main(src_dataset_path, dst_dataset_path, overlay_name):
src_uri = ensure_uri(src_dataset_path)
dst_uri = ensure_uri(dst_dataset_path)
src_dataset = dtoolcore.DataSet.from_uri(src_uri)
dst_dataset = dtoolcore.DataSet.from_uri(dst_uri)
src_overlay = src_dataset.get_overlay(overlay_name)
dst_overlay = {}
from_overlay = dst_dataset.get_overlay('from')
for dst_id in dst_dataset.identifiers:
src_id = from_overlay[dst_id]
dst_overlay[dst_id] = src_overlay[src_id]
dst_dataset.put_overlay(overlay_name, dst_overlay)
if __name__ == '__main__':
main()
|
|
63203ab1069999f81b9518c11da29eb8ca537077
|
openstack_dashboard/api/__init__.py
|
openstack_dashboard/api/__init__.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
import base
import cinder
import glance
import keystone
import network
import nova
import quantum
import swift
|
Resolve API import issues (quantum)
|
Resolve API import issues (quantum)
Restore the imports in the api init file, without including the "from"
to ensure api calls still must specify "api.nova", "api.quantum", etc.
Fixes bug #1125632
Change-Id: I981105ce0ed7f1352de42fe2c0620665ba378823
|
Python
|
apache-2.0
|
vladryk/horizon,mrunge/openstack_horizon,redhat-openstack/horizon,citrix-openstack-build/horizon,Hodorable/0602,citrix-openstack-build/horizon,doug-fish/horizon,VaneCloud/horizon,tanglei528/horizon,karthik-suresh/horizon,j4/horizon,NeCTAR-RC/horizon,Solinea/horizon,wangxiangyu/horizon,luhanhan/horizon,newrocknj/horizon,icloudrnd/automation_tools,openstack/horizon,henaras/horizon,redhat-cip/horizon,zouyapeng/horizon,takeshineshiro/horizon,tuskar/tuskar-ui,eayunstack/horizon,redhat-cip/horizon,endorphinl/horizon,newrocknj/horizon,endorphinl/horizon-fork,karthik-suresh/horizon,tellesnobrega/horizon,j4/horizon,BiznetGIO/horizon,Dark-Hacker/horizon,mrunge/openstack_horizon,saydulk/horizon,henaras/horizon,damien-dg/horizon,zouyapeng/horizon,sandvine/horizon,endorphinl/horizon-fork,j4/horizon,philoniare/horizon,Tesora/tesora-horizon,wolverineav/horizon,ChameleonCloud/horizon,ikargis/horizon_fod,izadorozhna/dashboard_integration_tests,doug-fish/horizon,newrocknj/horizon,dan1/horizon-x509,Frostman/eho-horizon,RudoCris/horizon,damien-dg/horizon,kaiweifan/horizon,JioCloud/horizon,openstack/horizon,VaneCloud/horizon,yanheven/console,yanheven/console,NCI-Cloud/horizon,yeming233/horizon,noironetworks/horizon,agileblaze/OpenStackTwoFactorAuthentication,spring-week-topos/horizon-week,tanglei528/horizon,dan1/horizon-proto,Solinea/horizon,xme1226/horizon,CiscoSystems/avos,blueboxgroup/horizon,eayunstack/horizon,Metaswitch/horizon,Solinea/horizon,Dark-Hacker/horizon,icloudrnd/automation_tools,anthonydillon/horizon,wolverineav/horizon,endorphinl/horizon,mrunge/horizon_lib,saydulk/horizon,blueboxgroup/horizon,anthonydillon/horizon,liyitest/rr,flochaz/horizon,saydulk/horizon,xinwu/horizon,maestro-hybrid-cloud/horizon,davidcusatis/horizon,wangxiangyu/horizon,bigswitch/horizon,mrunge/openstack_horizon,kaiweifan/horizon,Daniex/horizon,liyitest/rr,gochist/horizon,doug-fish/horizon,FNST-OpenStack/horizon,yanheven/console,doug-fish/horizon,sandvine/horizon,rdo-management/tuskar-ui,tqtran7/horizon,henaras/horizon,tqtran7/horizon,takeshineshiro/horizon,citrix-openstack-build/horizon,luhanhan/horizon,ging/horizon,Hodorable/0602,tqtran7/horizon,Metaswitch/horizon,xinwu/horizon,nvoron23/avos,philoniare/horizon,Daniex/horizon,FNST-OpenStack/horizon,j4/horizon,ging/horizon,anthonydillon/horizon,gerrive/horizon,pranavtendolkr/horizon,nvoron23/avos,watonyweng/horizon,aaronorosen/horizon-congress,BiznetGIO/horizon,Solinea/horizon,yjxtogo/horizon,zouyapeng/horizon,dan1/horizon-proto,CiscoSystems/avos,froyobin/horizon,CiscoSystems/horizon,NeCTAR-RC/horizon,promptworks/horizon,idjaw/horizon,liyitest/rr,idjaw/horizon,froyobin/horizon,blueboxgroup/horizon,vladryk/horizon,endorphinl/horizon-fork,kaiweifan/horizon,watonyweng/horizon,ikargis/horizon_fod,vladryk/horizon,redhat-openstack/horizon,gerrive/horizon,davidcusatis/horizon,ChameleonCloud/horizon,CiscoSystems/avos,ChameleonCloud/horizon,Mirantis/mos-horizon,coreycb/horizon,flochaz/horizon,RudoCris/horizon,mdavid/horizon,Dark-Hacker/horizon,promptworks/horizon,spring-week-topos/horizon-week,izadorozhna/dashboard_integration_tests,dan1/horizon-proto,saydulk/horizon,tanglei528/horizon,davidcusatis/horizon,maestro-hybrid-cloud/horizon,dan1/horizon-x509,VaneCloud/horizon,dan1/horizon-proto,liyitest/rr,tellesnobrega/horizon,kfox1111/horizon,redhat-cip/horizon,CiscoSystems/horizon,coreycb/horizon,aaronorosen/horizon-congress,noironetworks/horizon,netscaler/horizon,Tesora/tesora-horizon,django-leonardo/horizon,JioCloud/horizon,bac/horizon,CiscoSystems/horizon,tsufiev/horizon,CiscoSystems/avos,Tesora/tesora-horizon,anthonydillon/horizon,luhanhan/horizon,tellesnobrega/horizon,bac/horizon,kfox1111/horizon,Daniex/horizon,RudoCris/horizon,mandeepdhami/horizon,icloudrnd/automation_tools,nvoron23/avos,tellesnobrega/horizon,Hodorable/0602,tsufiev/horizon,yeming233/horizon,icloudrnd/automation_tools,pranavtendolkr/horizon,ikargis/horizon_fod,bac/horizon,philoniare/horizon,xme1226/horizon,bac/horizon,zouyapeng/horizon,aaronorosen/horizon-congress,RudoCris/horizon,mdavid/horizon,newrocknj/horizon,r-icarus/openstack_microserver,dan1/horizon-x509,yjxtogo/horizon,agileblaze/OpenStackTwoFactorAuthentication,noironetworks/horizon,tqtran7/horizon,mrunge/horizon,openstack-ja/horizon,gochist/horizon,Mirantis/mos-horizon,agileblaze/OpenStackTwoFactorAuthentication,maestro-hybrid-cloud/horizon,rdo-management/tuskar-ui,spring-week-topos/horizon-week,xinwu/horizon,Frostman/eho-horizon,nvoron23/avos,orbitfp7/horizon,eayunstack/horizon,Hodorable/0602,xinwu/horizon,rickerc/horizon_audit,vladryk/horizon,redhat-cip/horizon,luhanhan/horizon,watonyweng/horizon,tuskar/tuskar-ui,mdavid/horizon,promptworks/horizon,BiznetGIO/horizon,netscaler/horizon,coreycb/horizon,r-icarus/openstack_microserver,wangxiangyu/horizon,orbitfp7/horizon,openstack-ja/horizon,mandeepdhami/horizon,wolverineav/horizon,karthik-suresh/horizon,rickerc/horizon_audit,endorphinl/horizon-fork,dan1/horizon-x509,yjxtogo/horizon,yeming233/horizon,henaras/horizon,yjxtogo/horizon,BiznetGIO/horizon,django-leonardo/horizon,froyobin/horizon,ging/horizon,redhat-openstack/horizon,orbitfp7/horizon,watonyweng/horizon,VaneCloud/horizon,openstack/horizon,mandeepdhami/horizon,mdavid/horizon,Daniex/horizon,flochaz/horizon,netscaler/horizon,takeshineshiro/horizon,django-leonardo/horizon,blueboxgroup/horizon,orbitfp7/horizon,promptworks/horizon,tuskar/tuskar-ui,kfox1111/horizon,wolverineav/horizon,bigswitch/horizon,ChameleonCloud/horizon,idjaw/horizon,FNST-OpenStack/horizon,JioCloud/horizon,bigswitch/horizon,xme1226/horizon,philoniare/horizon,kfox1111/horizon,mrunge/horizon,FNST-OpenStack/horizon,mrunge/horizon_lib,NeCTAR-RC/horizon,rdo-management/tuskar-ui,takeshineshiro/horizon,Dark-Hacker/horizon,Mirantis/mos-horizon,ging/horizon,sandvine/horizon,openstack-ja/horizon,wangxiangyu/horizon,sandvine/horizon,maestro-hybrid-cloud/horizon,NCI-Cloud/horizon,Tesora/tesora-horizon,Metaswitch/horizon,flochaz/horizon,tsufiev/horizon,mrunge/horizon,gerrive/horizon,endorphinl/horizon,Mirantis/mos-horizon,rickerc/horizon_audit,django-leonardo/horizon,pranavtendolkr/horizon,yeming233/horizon,rdo-management/tuskar-ui,damien-dg/horizon,coreycb/horizon,damien-dg/horizon,CiscoSystems/horizon,r-icarus/openstack_microserver,NCI-Cloud/horizon,Metaswitch/horizon,tsufiev/horizon,gochist/horizon,NeCTAR-RC/horizon,gerrive/horizon,redhat-openstack/horizon,Frostman/eho-horizon,mandeepdhami/horizon,endorphinl/horizon,pranavtendolkr/horizon,openstack/horizon,noironetworks/horizon,karthik-suresh/horizon,NCI-Cloud/horizon,mrunge/horizon_lib,bigswitch/horizon,agileblaze/OpenStackTwoFactorAuthentication,davidcusatis/horizon,idjaw/horizon
|
Resolve API import issues (quantum)
Restore the imports in the api init file, without including the "from"
to ensure api calls still must specify "api.nova", "api.quantum", etc.
Fixes bug #1125632
Change-Id: I981105ce0ed7f1352de42fe2c0620665ba378823
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
import base
import cinder
import glance
import keystone
import network
import nova
import quantum
import swift
|
<commit_before><commit_msg>Resolve API import issues (quantum)
Restore the imports in the api init file, without including the "from"
to ensure api calls still must specify "api.nova", "api.quantum", etc.
Fixes bug #1125632
Change-Id: I981105ce0ed7f1352de42fe2c0620665ba378823<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
import base
import cinder
import glance
import keystone
import network
import nova
import quantum
import swift
|
Resolve API import issues (quantum)
Restore the imports in the api init file, without including the "from"
to ensure api calls still must specify "api.nova", "api.quantum", etc.
Fixes bug #1125632
Change-Id: I981105ce0ed7f1352de42fe2c0620665ba378823# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
import base
import cinder
import glance
import keystone
import network
import nova
import quantum
import swift
|
<commit_before><commit_msg>Resolve API import issues (quantum)
Restore the imports in the api init file, without including the "from"
to ensure api calls still must specify "api.nova", "api.quantum", etc.
Fixes bug #1125632
Change-Id: I981105ce0ed7f1352de42fe2c0620665ba378823<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Methods and interface objects used to interact with external APIs.
API method calls return objects that are in many cases objects with
attributes that are direct maps to the data returned from the API http call.
Unfortunately, these objects are also often constructed dynamically, making
it difficult to know what data is available from the API object. Because of
this, all API calls should wrap their returned object in one defined here,
using only explicitly defined atributes and/or methods.
In other words, Horizon developers not working on openstack_dashboard.api
shouldn't need to understand the finer details of APIs for
Keystone/Nova/Glance/Swift et. al.
"""
import base
import cinder
import glance
import keystone
import network
import nova
import quantum
import swift
|
|
6bf80a7f367593e71fad788a933819c6849c3723
|
day-06/p1.py
|
day-06/p1.py
|
import re
from numpy import matrix
instructions = []
with open('input.txt', 'r') as f:
regex = re.compile(r'([\w ]+) (\d+),(\d+) .+ (\d+),(\d+)')
for line in f:
match = regex.match(line)
instructions.append((
match.group(1),
(int(match.group(2)), int(match.group(3))),
(int(match.group(3)), int(match.group(5)))
))
lights = matrix([[0 for i in range(1000)] for j in range(1000)])
def act(instruction):
action = instruction[0]
x1, y1 = instruction[1]
x2, y2 = instruction[2]
if action == 'turn on':
lights[x1:x2, y1:y2] = 1
elif action == 'turn off':
lights[x1:x2, y1:y2] = 0
else:
lights[x1:x2, y1:y2] ^= 1
for step in instructions:
act(step)
print(lights.flatten().tolist()[0].count(1))
|
Add currently-incorrect day 6 part 1
|
Add currently-incorrect day 6 part 1
|
Python
|
mit
|
foxscotch/advent-of-code,foxscotch/advent-of-code
|
Add currently-incorrect day 6 part 1
|
import re
from numpy import matrix
instructions = []
with open('input.txt', 'r') as f:
regex = re.compile(r'([\w ]+) (\d+),(\d+) .+ (\d+),(\d+)')
for line in f:
match = regex.match(line)
instructions.append((
match.group(1),
(int(match.group(2)), int(match.group(3))),
(int(match.group(3)), int(match.group(5)))
))
lights = matrix([[0 for i in range(1000)] for j in range(1000)])
def act(instruction):
action = instruction[0]
x1, y1 = instruction[1]
x2, y2 = instruction[2]
if action == 'turn on':
lights[x1:x2, y1:y2] = 1
elif action == 'turn off':
lights[x1:x2, y1:y2] = 0
else:
lights[x1:x2, y1:y2] ^= 1
for step in instructions:
act(step)
print(lights.flatten().tolist()[0].count(1))
|
<commit_before><commit_msg>Add currently-incorrect day 6 part 1<commit_after>
|
import re
from numpy import matrix
instructions = []
with open('input.txt', 'r') as f:
regex = re.compile(r'([\w ]+) (\d+),(\d+) .+ (\d+),(\d+)')
for line in f:
match = regex.match(line)
instructions.append((
match.group(1),
(int(match.group(2)), int(match.group(3))),
(int(match.group(3)), int(match.group(5)))
))
lights = matrix([[0 for i in range(1000)] for j in range(1000)])
def act(instruction):
action = instruction[0]
x1, y1 = instruction[1]
x2, y2 = instruction[2]
if action == 'turn on':
lights[x1:x2, y1:y2] = 1
elif action == 'turn off':
lights[x1:x2, y1:y2] = 0
else:
lights[x1:x2, y1:y2] ^= 1
for step in instructions:
act(step)
print(lights.flatten().tolist()[0].count(1))
|
Add currently-incorrect day 6 part 1import re
from numpy import matrix
instructions = []
with open('input.txt', 'r') as f:
regex = re.compile(r'([\w ]+) (\d+),(\d+) .+ (\d+),(\d+)')
for line in f:
match = regex.match(line)
instructions.append((
match.group(1),
(int(match.group(2)), int(match.group(3))),
(int(match.group(3)), int(match.group(5)))
))
lights = matrix([[0 for i in range(1000)] for j in range(1000)])
def act(instruction):
action = instruction[0]
x1, y1 = instruction[1]
x2, y2 = instruction[2]
if action == 'turn on':
lights[x1:x2, y1:y2] = 1
elif action == 'turn off':
lights[x1:x2, y1:y2] = 0
else:
lights[x1:x2, y1:y2] ^= 1
for step in instructions:
act(step)
print(lights.flatten().tolist()[0].count(1))
|
<commit_before><commit_msg>Add currently-incorrect day 6 part 1<commit_after>import re
from numpy import matrix
instructions = []
with open('input.txt', 'r') as f:
regex = re.compile(r'([\w ]+) (\d+),(\d+) .+ (\d+),(\d+)')
for line in f:
match = regex.match(line)
instructions.append((
match.group(1),
(int(match.group(2)), int(match.group(3))),
(int(match.group(3)), int(match.group(5)))
))
lights = matrix([[0 for i in range(1000)] for j in range(1000)])
def act(instruction):
action = instruction[0]
x1, y1 = instruction[1]
x2, y2 = instruction[2]
if action == 'turn on':
lights[x1:x2, y1:y2] = 1
elif action == 'turn off':
lights[x1:x2, y1:y2] = 0
else:
lights[x1:x2, y1:y2] ^= 1
for step in instructions:
act(step)
print(lights.flatten().tolist()[0].count(1))
|
|
f9f7493d89aa842fffc085758a1fe5791cd65704
|
monitors/migrations/post/0004_migrate_subscriptions.py
|
monitors/migrations/post/0004_migrate_subscriptions.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.apps import apps
def populate_subscriptions(apps):
print("entering populate_subscriptions")
Certificates = apps.get_model("monitors","CertificateMonitor")
Subscription = apps.get_model("monitors","CertificateSubscription")
for certificate in Certificates.objects.all():
NewSubscription = Subscription(certificate=certificate,owner= certificate.owner)
NewSubscription.save()
class Migration(migrations.Migration):
dependencies = [
('monitors', '0003_certificatesubscription'),
]
operations = [
migrations.RunPython(populate_subscriptions(apps)),
]
|
Add script to migrate data to certificatesubscription table
|
Add script to migrate data to certificatesubscription table
|
Python
|
mit
|
gdit-cnd/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID,LindaTNguyen/RAPID,LindaTNguyen/RAPID,gdit-cnd/RAPID,gdit-cnd/RAPID,gdit-cnd/RAPID
|
Add script to migrate data to certificatesubscription table
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.apps import apps
def populate_subscriptions(apps):
print("entering populate_subscriptions")
Certificates = apps.get_model("monitors","CertificateMonitor")
Subscription = apps.get_model("monitors","CertificateSubscription")
for certificate in Certificates.objects.all():
NewSubscription = Subscription(certificate=certificate,owner= certificate.owner)
NewSubscription.save()
class Migration(migrations.Migration):
dependencies = [
('monitors', '0003_certificatesubscription'),
]
operations = [
migrations.RunPython(populate_subscriptions(apps)),
]
|
<commit_before><commit_msg>Add script to migrate data to certificatesubscription table<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.apps import apps
def populate_subscriptions(apps):
print("entering populate_subscriptions")
Certificates = apps.get_model("monitors","CertificateMonitor")
Subscription = apps.get_model("monitors","CertificateSubscription")
for certificate in Certificates.objects.all():
NewSubscription = Subscription(certificate=certificate,owner= certificate.owner)
NewSubscription.save()
class Migration(migrations.Migration):
dependencies = [
('monitors', '0003_certificatesubscription'),
]
operations = [
migrations.RunPython(populate_subscriptions(apps)),
]
|
Add script to migrate data to certificatesubscription table# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.apps import apps
def populate_subscriptions(apps):
print("entering populate_subscriptions")
Certificates = apps.get_model("monitors","CertificateMonitor")
Subscription = apps.get_model("monitors","CertificateSubscription")
for certificate in Certificates.objects.all():
NewSubscription = Subscription(certificate=certificate,owner= certificate.owner)
NewSubscription.save()
class Migration(migrations.Migration):
dependencies = [
('monitors', '0003_certificatesubscription'),
]
operations = [
migrations.RunPython(populate_subscriptions(apps)),
]
|
<commit_before><commit_msg>Add script to migrate data to certificatesubscription table<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.apps import apps
def populate_subscriptions(apps):
print("entering populate_subscriptions")
Certificates = apps.get_model("monitors","CertificateMonitor")
Subscription = apps.get_model("monitors","CertificateSubscription")
for certificate in Certificates.objects.all():
NewSubscription = Subscription(certificate=certificate,owner= certificate.owner)
NewSubscription.save()
class Migration(migrations.Migration):
dependencies = [
('monitors', '0003_certificatesubscription'),
]
operations = [
migrations.RunPython(populate_subscriptions(apps)),
]
|
|
b614fae93ff965abb45ee26e1a72198b4b4af8ec
|
saltcloud/clouds/ec2.py
|
saltcloud/clouds/ec2.py
|
'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
# Import python libs
import os
import tempfile
import shutil
#
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import salt libs
import saltcloud.utils
import salt.crypt
def conn(vm_):
'''
Return a conn object for the passed vm data
'''
prov = 'EC2'
if 'location' in vm_:
prov += '_{0}'.format(vm_['location'])
elif 'location' in __opts__:
if __opts__['location']:
prov += '_{0}'.format(__opts__['location'])
if not hasattr(Provider, prov):
return None
driver = get_driver('EC2')
return driver(
__opts__['EC2_user'],
__opts__['EC2_key'],
)
def ssh_pub(vm_):
'''
Deploy the primary ssh authentication key
'''
ssh = ''
if 'ssh_auth' in vm_:
if not os.path.isfile(vm_['ssh_auth']):
return None
ssh = vm_['ssh_auth']
if not ssh:
if not os.path.isfile(__opts__['ssh_auth']):
return None
ssh = __opts__['ssh_auth']
return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
def script(vm_):
'''
Return the deployment object for managing a script
'''
os_ = ''
if 'os' in vm_:
os_ = vm_['os']
if not os_:
os_ = __opts__['os']
return ScriptDeployment(saltcloud.utils.os_script(os_))
def image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
if not 'image' in vm_:
return images[0]
if isinstance(vm_['image'], int):
return images[vm_['image']]
for img in images:
if img.id == vm_['image']:
return img
def size(conn, vm_):
'''
Return the vm's size object
'''
sizes = conn.list_sizes()
if not 'size' in vm_:
return sizes[0]
if isinstance(vm_['size'], int):
return sizes[vm_['size']]
for size in sizes:
if size.id == vm_['size']:
return size
if size.name == vm_['size']:
return size
def create(vm_):
'''
Create a single vm from a data dict
'''
connection = conn(vm_)
msd = MultiStepDeployment([ssh_pub(vm_), script(vm_)])
image = image(conn, vm_)
size = size(conn, vm_)
return conn.deploy_node(
name=vm_['name'],
image=image,
size=size,
deploy=msd)
|
Add initial testing module for cloud creation
|
Add initial testing module for cloud creation
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add initial testing module for cloud creation
|
'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
# Import python libs
import os
import tempfile
import shutil
#
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import salt libs
import saltcloud.utils
import salt.crypt
def conn(vm_):
'''
Return a conn object for the passed vm data
'''
prov = 'EC2'
if 'location' in vm_:
prov += '_{0}'.format(vm_['location'])
elif 'location' in __opts__:
if __opts__['location']:
prov += '_{0}'.format(__opts__['location'])
if not hasattr(Provider, prov):
return None
driver = get_driver('EC2')
return driver(
__opts__['EC2_user'],
__opts__['EC2_key'],
)
def ssh_pub(vm_):
'''
Deploy the primary ssh authentication key
'''
ssh = ''
if 'ssh_auth' in vm_:
if not os.path.isfile(vm_['ssh_auth']):
return None
ssh = vm_['ssh_auth']
if not ssh:
if not os.path.isfile(__opts__['ssh_auth']):
return None
ssh = __opts__['ssh_auth']
return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
def script(vm_):
'''
Return the deployment object for managing a script
'''
os_ = ''
if 'os' in vm_:
os_ = vm_['os']
if not os_:
os_ = __opts__['os']
return ScriptDeployment(saltcloud.utils.os_script(os_))
def image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
if not 'image' in vm_:
return images[0]
if isinstance(vm_['image'], int):
return images[vm_['image']]
for img in images:
if img.id == vm_['image']:
return img
def size(conn, vm_):
'''
Return the vm's size object
'''
sizes = conn.list_sizes()
if not 'size' in vm_:
return sizes[0]
if isinstance(vm_['size'], int):
return sizes[vm_['size']]
for size in sizes:
if size.id == vm_['size']:
return size
if size.name == vm_['size']:
return size
def create(vm_):
'''
Create a single vm from a data dict
'''
connection = conn(vm_)
msd = MultiStepDeployment([ssh_pub(vm_), script(vm_)])
image = image(conn, vm_)
size = size(conn, vm_)
return conn.deploy_node(
name=vm_['name'],
image=image,
size=size,
deploy=msd)
|
<commit_before><commit_msg>Add initial testing module for cloud creation<commit_after>
|
'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
# Import python libs
import os
import tempfile
import shutil
#
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import salt libs
import saltcloud.utils
import salt.crypt
def conn(vm_):
'''
Return a conn object for the passed vm data
'''
prov = 'EC2'
if 'location' in vm_:
prov += '_{0}'.format(vm_['location'])
elif 'location' in __opts__:
if __opts__['location']:
prov += '_{0}'.format(__opts__['location'])
if not hasattr(Provider, prov):
return None
driver = get_driver('EC2')
return driver(
__opts__['EC2_user'],
__opts__['EC2_key'],
)
def ssh_pub(vm_):
'''
Deploy the primary ssh authentication key
'''
ssh = ''
if 'ssh_auth' in vm_:
if not os.path.isfile(vm_['ssh_auth']):
return None
ssh = vm_['ssh_auth']
if not ssh:
if not os.path.isfile(__opts__['ssh_auth']):
return None
ssh = __opts__['ssh_auth']
return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
def script(vm_):
'''
Return the deployment object for managing a script
'''
os_ = ''
if 'os' in vm_:
os_ = vm_['os']
if not os_:
os_ = __opts__['os']
return ScriptDeployment(saltcloud.utils.os_script(os_))
def image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
if not 'image' in vm_:
return images[0]
if isinstance(vm_['image'], int):
return images[vm_['image']]
for img in images:
if img.id == vm_['image']:
return img
def size(conn, vm_):
'''
Return the vm's size object
'''
sizes = conn.list_sizes()
if not 'size' in vm_:
return sizes[0]
if isinstance(vm_['size'], int):
return sizes[vm_['size']]
for size in sizes:
if size.id == vm_['size']:
return size
if size.name == vm_['size']:
return size
def create(vm_):
'''
Create a single vm from a data dict
'''
connection = conn(vm_)
msd = MultiStepDeployment([ssh_pub(vm_), script(vm_)])
image = image(conn, vm_)
size = size(conn, vm_)
return conn.deploy_node(
name=vm_['name'],
image=image,
size=size,
deploy=msd)
|
Add initial testing module for cloud creation'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
# Import python libs
import os
import tempfile
import shutil
#
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import salt libs
import saltcloud.utils
import salt.crypt
def conn(vm_):
'''
Return a conn object for the passed vm data
'''
prov = 'EC2'
if 'location' in vm_:
prov += '_{0}'.format(vm_['location'])
elif 'location' in __opts__:
if __opts__['location']:
prov += '_{0}'.format(__opts__['location'])
if not hasattr(Provider, prov):
return None
driver = get_driver('EC2')
return driver(
__opts__['EC2_user'],
__opts__['EC2_key'],
)
def ssh_pub(vm_):
'''
Deploy the primary ssh authentication key
'''
ssh = ''
if 'ssh_auth' in vm_:
if not os.path.isfile(vm_['ssh_auth']):
return None
ssh = vm_['ssh_auth']
if not ssh:
if not os.path.isfile(__opts__['ssh_auth']):
return None
ssh = __opts__['ssh_auth']
return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
def script(vm_):
'''
Return the deployment object for managing a script
'''
os_ = ''
if 'os' in vm_:
os_ = vm_['os']
if not os_:
os_ = __opts__['os']
return ScriptDeployment(saltcloud.utils.os_script(os_))
def image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
if not 'image' in vm_:
return images[0]
if isinstance(vm_['image'], int):
return images[vm_['image']]
for img in images:
if img.id == vm_['image']:
return img
def size(conn, vm_):
'''
Return the vm's size object
'''
sizes = conn.list_sizes()
if not 'size' in vm_:
return sizes[0]
if isinstance(vm_['size'], int):
return sizes[vm_['size']]
for size in sizes:
if size.id == vm_['size']:
return size
if size.name == vm_['size']:
return size
def create(vm_):
'''
Create a single vm from a data dict
'''
connection = conn(vm_)
msd = MultiStepDeployment([ssh_pub(vm_), script(vm_)])
image = image(conn, vm_)
size = size(conn, vm_)
return conn.deploy_node(
name=vm_['name'],
image=image,
size=size,
deploy=msd)
|
<commit_before><commit_msg>Add initial testing module for cloud creation<commit_after>'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
# Import python libs
import os
import tempfile
import shutil
#
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import salt libs
import saltcloud.utils
import salt.crypt
def conn(vm_):
'''
Return a conn object for the passed vm data
'''
prov = 'EC2'
if 'location' in vm_:
prov += '_{0}'.format(vm_['location'])
elif 'location' in __opts__:
if __opts__['location']:
prov += '_{0}'.format(__opts__['location'])
if not hasattr(Provider, prov):
return None
driver = get_driver('EC2')
return driver(
__opts__['EC2_user'],
__opts__['EC2_key'],
)
def ssh_pub(vm_):
'''
Deploy the primary ssh authentication key
'''
ssh = ''
if 'ssh_auth' in vm_:
if not os.path.isfile(vm_['ssh_auth']):
return None
ssh = vm_['ssh_auth']
if not ssh:
if not os.path.isfile(__opts__['ssh_auth']):
return None
ssh = __opts__['ssh_auth']
return SSHKeyDeployment(open(os.path.expanduser(ssh)).read())
def script(vm_):
'''
Return the deployment object for managing a script
'''
os_ = ''
if 'os' in vm_:
os_ = vm_['os']
if not os_:
os_ = __opts__['os']
return ScriptDeployment(saltcloud.utils.os_script(os_))
def image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
if not 'image' in vm_:
return images[0]
if isinstance(vm_['image'], int):
return images[vm_['image']]
for img in images:
if img.id == vm_['image']:
return img
def size(conn, vm_):
'''
Return the vm's size object
'''
sizes = conn.list_sizes()
if not 'size' in vm_:
return sizes[0]
if isinstance(vm_['size'], int):
return sizes[vm_['size']]
for size in sizes:
if size.id == vm_['size']:
return size
if size.name == vm_['size']:
return size
def create(vm_):
'''
Create a single vm from a data dict
'''
connection = conn(vm_)
msd = MultiStepDeployment([ssh_pub(vm_), script(vm_)])
image = image(conn, vm_)
size = size(conn, vm_)
return conn.deploy_node(
name=vm_['name'],
image=image,
size=size,
deploy=msd)
|
|
28460efa5c8add33c36f45ac3bdefbd4c94ac0af
|
py/desisurvey/test/test_optimize.py
|
py/desisurvey/test/test_optimize.py
|
import unittest
import numpy as np
from desisurvey.optimize import *
class TestUtils(unittest.TestCase):
def test_wrap_unwrap(self):
x = np.linspace(0., 350., 97)
for dx in (-60, 0, 60):
w = wrap(x, dx)
assert np.all(w >= dx)
assert np.all(w < dx + 360)
|
Add simple unit test for optimize module
|
Add simple unit test for optimize module
|
Python
|
bsd-3-clause
|
desihub/desisurvey,desihub/desisurvey
|
Add simple unit test for optimize module
|
import unittest
import numpy as np
from desisurvey.optimize import *
class TestUtils(unittest.TestCase):
def test_wrap_unwrap(self):
x = np.linspace(0., 350., 97)
for dx in (-60, 0, 60):
w = wrap(x, dx)
assert np.all(w >= dx)
assert np.all(w < dx + 360)
|
<commit_before><commit_msg>Add simple unit test for optimize module<commit_after>
|
import unittest
import numpy as np
from desisurvey.optimize import *
class TestUtils(unittest.TestCase):
def test_wrap_unwrap(self):
x = np.linspace(0., 350., 97)
for dx in (-60, 0, 60):
w = wrap(x, dx)
assert np.all(w >= dx)
assert np.all(w < dx + 360)
|
Add simple unit test for optimize moduleimport unittest
import numpy as np
from desisurvey.optimize import *
class TestUtils(unittest.TestCase):
def test_wrap_unwrap(self):
x = np.linspace(0., 350., 97)
for dx in (-60, 0, 60):
w = wrap(x, dx)
assert np.all(w >= dx)
assert np.all(w < dx + 360)
|
<commit_before><commit_msg>Add simple unit test for optimize module<commit_after>import unittest
import numpy as np
from desisurvey.optimize import *
class TestUtils(unittest.TestCase):
def test_wrap_unwrap(self):
x = np.linspace(0., 350., 97)
for dx in (-60, 0, 60):
w = wrap(x, dx)
assert np.all(w >= dx)
assert np.all(w < dx + 360)
|
|
f55d01d3b2b96af4394107404a452101c3aa7104
|
circle_transform.py
|
circle_transform.py
|
# License: Public Domain
# (C) 2014 Toshimitsu Kimura
import sys
import cv2
import numpy as np
import math
def main():
if len(sys.argv) != 3:
print "./circle_transform.py <in_file> <out_file>"
quit()
filename = sys.argv[1]
outfile = sys.argv[2]
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
print "input file is not found"
quit()
circle = np.zeros([image.shape[0], image.shape[1], 2], dtype=np.float)
center = [image.shape[0]/2.0, image.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pos = [i-center[0], j-center[1]]
if pos[0] == 0: continue
n = pos[1]/pos[0] # y = nx, x = y/n
if n == 0: continue
circle[i,j,0] = 1.0 - math.sqrt(pos[0]**2 + pos[1]**2) / math.sqrt(min(center[0]**2 + (n*center[0])**2, center[1]**2 + (center[1]/n)**2))
circle[i,j,1] = math.atan2(pos[1], pos[0])
img_out_size = max(image.shape[0], image.shape[1]) / 2.0
img_out = np.zeros([img_out_size, img_out_size, image.shape[2]], dtype=np.uint8)
out_half = [img_out.shape[0]/2.0, img_out.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
dist = circle[i,j,0]
rad = circle[i,j,1]
idx_x = (math.cos(rad)*dist*out_half[0] + out_half[0])%img_out.shape[0]
idx_y = (math.sin(rad)*dist*out_half[1] + out_half[1])%img_out.shape[1]
img_out[idx_x, idx_y] = image[i, j]
cv2.imwrite(outfile, img_out)
main()
|
Add circle transform script that may be useful for OCR
|
Add circle transform script that may be useful for OCR
|
Python
|
lgpl-2.1
|
nazodane/binarization_for_ocr
|
Add circle transform script that may be useful for OCR
|
# License: Public Domain
# (C) 2014 Toshimitsu Kimura
import sys
import cv2
import numpy as np
import math
def main():
if len(sys.argv) != 3:
print "./circle_transform.py <in_file> <out_file>"
quit()
filename = sys.argv[1]
outfile = sys.argv[2]
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
print "input file is not found"
quit()
circle = np.zeros([image.shape[0], image.shape[1], 2], dtype=np.float)
center = [image.shape[0]/2.0, image.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pos = [i-center[0], j-center[1]]
if pos[0] == 0: continue
n = pos[1]/pos[0] # y = nx, x = y/n
if n == 0: continue
circle[i,j,0] = 1.0 - math.sqrt(pos[0]**2 + pos[1]**2) / math.sqrt(min(center[0]**2 + (n*center[0])**2, center[1]**2 + (center[1]/n)**2))
circle[i,j,1] = math.atan2(pos[1], pos[0])
img_out_size = max(image.shape[0], image.shape[1]) / 2.0
img_out = np.zeros([img_out_size, img_out_size, image.shape[2]], dtype=np.uint8)
out_half = [img_out.shape[0]/2.0, img_out.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
dist = circle[i,j,0]
rad = circle[i,j,1]
idx_x = (math.cos(rad)*dist*out_half[0] + out_half[0])%img_out.shape[0]
idx_y = (math.sin(rad)*dist*out_half[1] + out_half[1])%img_out.shape[1]
img_out[idx_x, idx_y] = image[i, j]
cv2.imwrite(outfile, img_out)
main()
|
<commit_before><commit_msg>Add circle transform script that may be useful for OCR<commit_after>
|
# License: Public Domain
# (C) 2014 Toshimitsu Kimura
import sys
import cv2
import numpy as np
import math
def main():
if len(sys.argv) != 3:
print "./circle_transform.py <in_file> <out_file>"
quit()
filename = sys.argv[1]
outfile = sys.argv[2]
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
print "input file is not found"
quit()
circle = np.zeros([image.shape[0], image.shape[1], 2], dtype=np.float)
center = [image.shape[0]/2.0, image.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pos = [i-center[0], j-center[1]]
if pos[0] == 0: continue
n = pos[1]/pos[0] # y = nx, x = y/n
if n == 0: continue
circle[i,j,0] = 1.0 - math.sqrt(pos[0]**2 + pos[1]**2) / math.sqrt(min(center[0]**2 + (n*center[0])**2, center[1]**2 + (center[1]/n)**2))
circle[i,j,1] = math.atan2(pos[1], pos[0])
img_out_size = max(image.shape[0], image.shape[1]) / 2.0
img_out = np.zeros([img_out_size, img_out_size, image.shape[2]], dtype=np.uint8)
out_half = [img_out.shape[0]/2.0, img_out.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
dist = circle[i,j,0]
rad = circle[i,j,1]
idx_x = (math.cos(rad)*dist*out_half[0] + out_half[0])%img_out.shape[0]
idx_y = (math.sin(rad)*dist*out_half[1] + out_half[1])%img_out.shape[1]
img_out[idx_x, idx_y] = image[i, j]
cv2.imwrite(outfile, img_out)
main()
|
Add circle transform script that may be useful for OCR# License: Public Domain
# (C) 2014 Toshimitsu Kimura
import sys
import cv2
import numpy as np
import math
def main():
if len(sys.argv) != 3:
print "./circle_transform.py <in_file> <out_file>"
quit()
filename = sys.argv[1]
outfile = sys.argv[2]
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
print "input file is not found"
quit()
circle = np.zeros([image.shape[0], image.shape[1], 2], dtype=np.float)
center = [image.shape[0]/2.0, image.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pos = [i-center[0], j-center[1]]
if pos[0] == 0: continue
n = pos[1]/pos[0] # y = nx, x = y/n
if n == 0: continue
circle[i,j,0] = 1.0 - math.sqrt(pos[0]**2 + pos[1]**2) / math.sqrt(min(center[0]**2 + (n*center[0])**2, center[1]**2 + (center[1]/n)**2))
circle[i,j,1] = math.atan2(pos[1], pos[0])
img_out_size = max(image.shape[0], image.shape[1]) / 2.0
img_out = np.zeros([img_out_size, img_out_size, image.shape[2]], dtype=np.uint8)
out_half = [img_out.shape[0]/2.0, img_out.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
dist = circle[i,j,0]
rad = circle[i,j,1]
idx_x = (math.cos(rad)*dist*out_half[0] + out_half[0])%img_out.shape[0]
idx_y = (math.sin(rad)*dist*out_half[1] + out_half[1])%img_out.shape[1]
img_out[idx_x, idx_y] = image[i, j]
cv2.imwrite(outfile, img_out)
main()
|
<commit_before><commit_msg>Add circle transform script that may be useful for OCR<commit_after># License: Public Domain
# (C) 2014 Toshimitsu Kimura
import sys
import cv2
import numpy as np
import math
def main():
if len(sys.argv) != 3:
print "./circle_transform.py <in_file> <out_file>"
quit()
filename = sys.argv[1]
outfile = sys.argv[2]
image = cv2.imread(filename, cv2.IMREAD_COLOR)
if image is None:
print "input file is not found"
quit()
circle = np.zeros([image.shape[0], image.shape[1], 2], dtype=np.float)
center = [image.shape[0]/2.0, image.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
pos = [i-center[0], j-center[1]]
if pos[0] == 0: continue
n = pos[1]/pos[0] # y = nx, x = y/n
if n == 0: continue
circle[i,j,0] = 1.0 - math.sqrt(pos[0]**2 + pos[1]**2) / math.sqrt(min(center[0]**2 + (n*center[0])**2, center[1]**2 + (center[1]/n)**2))
circle[i,j,1] = math.atan2(pos[1], pos[0])
img_out_size = max(image.shape[0], image.shape[1]) / 2.0
img_out = np.zeros([img_out_size, img_out_size, image.shape[2]], dtype=np.uint8)
out_half = [img_out.shape[0]/2.0, img_out.shape[1]/2.0]
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
dist = circle[i,j,0]
rad = circle[i,j,1]
idx_x = (math.cos(rad)*dist*out_half[0] + out_half[0])%img_out.shape[0]
idx_y = (math.sin(rad)*dist*out_half[1] + out_half[1])%img_out.shape[1]
img_out[idx_x, idx_y] = image[i, j]
cv2.imwrite(outfile, img_out)
main()
|
|
794b5297c96529d895705214b1fb2ff9ba1510a9
|
into/backends/aws.py
|
into/backends/aws.py
|
from __future__ import print_function, division, absolute_import
from into import discover, CSV, resource, convert
import pandas as pd
from ..utils import cls_name
from toolz import memoize
from datashape import var
class S3(object):
"""An object that holds a resource that lives in an S3 bucket
Examples
--------
>>> bucket = S3(CSV('s3://nyqpug/tips.csv'))
>>> dshape = discover(bucket)
>>> dshape.measure['total_bill']
ctype("float64")
Notes
-----
* pandas read_csv can read CSV files directly from S3 buckets
* We should check to make sure that we are discovering from CSVs as fast as
as possible, since using S3 requires hitting the network.
* For more complicated formats we'll have to write some middleware that
turns S3 blobs into a format suitable to be converted into other formats
such as JSON, HDF5, etc. Some of this will be handled by the into graph.
"""
def __init__(self, data):
self.data = self.container(data)
@memoize
def s3(cls):
"""Parametrized S3 bucket Class
Notes
-----
* Shamelessly copied from ``into.chunks``
"""
return type('S3_' + cls_name(cls).replace('.', '_'), (S3,),
{'container': cls})
@resource.register('s3://.*\.csv', priority=18)
def resource_s3_csv(uri):
return s3(CSV)(uri)
@convert.register(CSV, s3(CSV))
def convert_s3_to_csv(bucket, **kwargs):
return CSV(bucket.data.path, **kwargs)
@discover.register(s3(CSV))
def discover_s3_csv(c, **kwargs):
return var * discover(c.data).subshape[0]
@convert.register(pd.DataFrame, s3(CSV))
def convert_csv_to_s3(csv):
return convert(pd.DataFrame, csv.data)
|
Implement part of the s3 csv into interface
|
Implement part of the s3 csv into interface
|
Python
|
bsd-3-clause
|
ywang007/odo,alexmojaki/odo,blaze/odo,blaze/odo,ContinuumIO/odo,cowlicks/odo,alexmojaki/odo,Dannnno/odo,cpcloud/odo,quantopian/odo,cpcloud/odo,cowlicks/odo,ContinuumIO/odo,Dannnno/odo,ywang007/odo,quantopian/odo
|
Implement part of the s3 csv into interface
|
from __future__ import print_function, division, absolute_import
from into import discover, CSV, resource, convert
import pandas as pd
from ..utils import cls_name
from toolz import memoize
from datashape import var
class S3(object):
"""An object that holds a resource that lives in an S3 bucket
Examples
--------
>>> bucket = S3(CSV('s3://nyqpug/tips.csv'))
>>> dshape = discover(bucket)
>>> dshape.measure['total_bill']
ctype("float64")
Notes
-----
* pandas read_csv can read CSV files directly from S3 buckets
* We should check to make sure that we are discovering from CSVs as fast as
as possible, since using S3 requires hitting the network.
* For more complicated formats we'll have to write some middleware that
turns S3 blobs into a format suitable to be converted into other formats
such as JSON, HDF5, etc. Some of this will be handled by the into graph.
"""
def __init__(self, data):
self.data = self.container(data)
@memoize
def s3(cls):
"""Parametrized S3 bucket Class
Notes
-----
* Shamelessly copied from ``into.chunks``
"""
return type('S3_' + cls_name(cls).replace('.', '_'), (S3,),
{'container': cls})
@resource.register('s3://.*\.csv', priority=18)
def resource_s3_csv(uri):
return s3(CSV)(uri)
@convert.register(CSV, s3(CSV))
def convert_s3_to_csv(bucket, **kwargs):
return CSV(bucket.data.path, **kwargs)
@discover.register(s3(CSV))
def discover_s3_csv(c, **kwargs):
return var * discover(c.data).subshape[0]
@convert.register(pd.DataFrame, s3(CSV))
def convert_csv_to_s3(csv):
return convert(pd.DataFrame, csv.data)
|
<commit_before><commit_msg>Implement part of the s3 csv into interface<commit_after>
|
from __future__ import print_function, division, absolute_import
from into import discover, CSV, resource, convert
import pandas as pd
from ..utils import cls_name
from toolz import memoize
from datashape import var
class S3(object):
"""An object that holds a resource that lives in an S3 bucket
Examples
--------
>>> bucket = S3(CSV('s3://nyqpug/tips.csv'))
>>> dshape = discover(bucket)
>>> dshape.measure['total_bill']
ctype("float64")
Notes
-----
* pandas read_csv can read CSV files directly from S3 buckets
* We should check to make sure that we are discovering from CSVs as fast as
as possible, since using S3 requires hitting the network.
* For more complicated formats we'll have to write some middleware that
turns S3 blobs into a format suitable to be converted into other formats
such as JSON, HDF5, etc. Some of this will be handled by the into graph.
"""
def __init__(self, data):
self.data = self.container(data)
@memoize
def s3(cls):
"""Parametrized S3 bucket Class
Notes
-----
* Shamelessly copied from ``into.chunks``
"""
return type('S3_' + cls_name(cls).replace('.', '_'), (S3,),
{'container': cls})
@resource.register('s3://.*\.csv', priority=18)
def resource_s3_csv(uri):
return s3(CSV)(uri)
@convert.register(CSV, s3(CSV))
def convert_s3_to_csv(bucket, **kwargs):
return CSV(bucket.data.path, **kwargs)
@discover.register(s3(CSV))
def discover_s3_csv(c, **kwargs):
return var * discover(c.data).subshape[0]
@convert.register(pd.DataFrame, s3(CSV))
def convert_csv_to_s3(csv):
return convert(pd.DataFrame, csv.data)
|
Implement part of the s3 csv into interfacefrom __future__ import print_function, division, absolute_import
from into import discover, CSV, resource, convert
import pandas as pd
from ..utils import cls_name
from toolz import memoize
from datashape import var
class S3(object):
"""An object that holds a resource that lives in an S3 bucket
Examples
--------
>>> bucket = S3(CSV('s3://nyqpug/tips.csv'))
>>> dshape = discover(bucket)
>>> dshape.measure['total_bill']
ctype("float64")
Notes
-----
* pandas read_csv can read CSV files directly from S3 buckets
* We should check to make sure that we are discovering from CSVs as fast as
as possible, since using S3 requires hitting the network.
* For more complicated formats we'll have to write some middleware that
turns S3 blobs into a format suitable to be converted into other formats
such as JSON, HDF5, etc. Some of this will be handled by the into graph.
"""
def __init__(self, data):
self.data = self.container(data)
@memoize
def s3(cls):
"""Parametrized S3 bucket Class
Notes
-----
* Shamelessly copied from ``into.chunks``
"""
return type('S3_' + cls_name(cls).replace('.', '_'), (S3,),
{'container': cls})
@resource.register('s3://.*\.csv', priority=18)
def resource_s3_csv(uri):
return s3(CSV)(uri)
@convert.register(CSV, s3(CSV))
def convert_s3_to_csv(bucket, **kwargs):
return CSV(bucket.data.path, **kwargs)
@discover.register(s3(CSV))
def discover_s3_csv(c, **kwargs):
return var * discover(c.data).subshape[0]
@convert.register(pd.DataFrame, s3(CSV))
def convert_csv_to_s3(csv):
return convert(pd.DataFrame, csv.data)
|
<commit_before><commit_msg>Implement part of the s3 csv into interface<commit_after>from __future__ import print_function, division, absolute_import
from into import discover, CSV, resource, convert
import pandas as pd
from ..utils import cls_name
from toolz import memoize
from datashape import var
class S3(object):
"""An object that holds a resource that lives in an S3 bucket
Examples
--------
>>> bucket = S3(CSV('s3://nyqpug/tips.csv'))
>>> dshape = discover(bucket)
>>> dshape.measure['total_bill']
ctype("float64")
Notes
-----
* pandas read_csv can read CSV files directly from S3 buckets
* We should check to make sure that we are discovering from CSVs as fast as
as possible, since using S3 requires hitting the network.
* For more complicated formats we'll have to write some middleware that
turns S3 blobs into a format suitable to be converted into other formats
such as JSON, HDF5, etc. Some of this will be handled by the into graph.
"""
def __init__(self, data):
self.data = self.container(data)
@memoize
def s3(cls):
"""Parametrized S3 bucket Class
Notes
-----
* Shamelessly copied from ``into.chunks``
"""
return type('S3_' + cls_name(cls).replace('.', '_'), (S3,),
{'container': cls})
@resource.register('s3://.*\.csv', priority=18)
def resource_s3_csv(uri):
return s3(CSV)(uri)
@convert.register(CSV, s3(CSV))
def convert_s3_to_csv(bucket, **kwargs):
return CSV(bucket.data.path, **kwargs)
@discover.register(s3(CSV))
def discover_s3_csv(c, **kwargs):
return var * discover(c.data).subshape[0]
@convert.register(pd.DataFrame, s3(CSV))
def convert_csv_to_s3(csv):
return convert(pd.DataFrame, csv.data)
|
|
b7212b9aa393faea3cfaf94c3dfaf17487461d62
|
lib/pegasus/python/Pegasus/test/service/test_service.py
|
lib/pegasus/python/Pegasus/test/service/test_service.py
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import unittest
class PegasusServiceTestCase(unittest.TestCase):
def test_imports(self):
from Pegasus.service.server import main
|
Add unit test to see if pegasus-service imports are succesful
|
Add unit test to see if pegasus-service imports are succesful
|
Python
|
apache-2.0
|
pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus,pegasus-isi/pegasus
|
Add unit test to see if pegasus-service imports are succesful
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import unittest
class PegasusServiceTestCase(unittest.TestCase):
def test_imports(self):
from Pegasus.service.server import main
|
<commit_before><commit_msg>Add unit test to see if pegasus-service imports are succesful<commit_after>
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import unittest
class PegasusServiceTestCase(unittest.TestCase):
def test_imports(self):
from Pegasus.service.server import main
|
Add unit test to see if pegasus-service imports are succesful# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import unittest
class PegasusServiceTestCase(unittest.TestCase):
def test_imports(self):
from Pegasus.service.server import main
|
<commit_before><commit_msg>Add unit test to see if pegasus-service imports are succesful<commit_after># Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
import unittest
class PegasusServiceTestCase(unittest.TestCase):
def test_imports(self):
from Pegasus.service.server import main
|
|
ee0f949da82f249684c63cefe49ee546aca76299
|
programs/utils/fix_json.py
|
programs/utils/fix_json.py
|
#!/usr/bin/python
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
import sys
if len(sys.argv) == 3:
infile = sys.argv[1]
outfile = sys.argv[2]
else:
infile = raw_input("Enter input file:")
outfile = raw_input("Enter output file:")
with open(infile, "r") as gen:
with open(outfile, "w") as out:
for line in gen:
if isfloat(line):
out.write(" " * 12 + str(int(float(line) * 1000000)) + "\n")
else:
out.write(line)
|
Add simple script that changes floating points in genesis.json to ints
|
Add simple script that changes floating points in genesis.json to ints
|
Python
|
unlicense
|
camponez/bitshares,jakeporter/Bitshares,jakeporter/Bitshares,frrp/bitshares,bitsuperlab/cpp-play,FollowMyVote/bitshares,frrp/bitshares,bitshares/devshares,frrp/bitshares,bitshares/bitshares,bitsuperlab/cpp-play,FollowMyVote/bitshares,bitsuperlab/cpp-play,Ziftr/bitshares,camponez/bitshares,bitshares/bitshares,bitshares/bitshares-0.x,drltc/keyid,camponez/bitshares,bitshares/bitshares-0.x,RemitaBit/Remitabit,bitshares/bitshares,RemitaBit/Remitabit,dacsunlimited/dac_play,Ziftr/bitshares,RemitaBit/Remitabit,dacsunlimited/dac_play,jakeporter/Bitshares,RemitaBit/Remitabit,bitshares/devshares,bitshares/bitshares-0.x,FollowMyVote/bitshares,bitshares/devshares,camponez/bitshares,dacsunlimited/dac_play,bitshares/bitshares-0.x,bitshares/bitshares,FollowMyVote/bitshares,dacsunlimited/dac_play,bitshares/bitshares,jakeporter/Bitshares,bitshares/devshares,jakeporter/Bitshares,bitshares/devshares,FollowMyVote/bitshares,frrp/bitshares,FollowMyVote/bitshares,dacsunlimited/dac_play,drltc/keyid,drltc/keyid,RemitaBit/Remitabit,bitshares/bitshares-0.x,bitsuperlab/cpp-play,dacsunlimited/dac_play,drltc/keyid,bitshares/devshares,camponez/bitshares,bitshares/bitshares,Ziftr/bitshares,bitshares/bitshares-0.x,frrp/bitshares,bitsuperlab/cpp-play,RemitaBit/Remitabit,frrp/bitshares,bitsuperlab/cpp-play,jakeporter/Bitshares,Ziftr/bitshares,camponez/bitshares
|
Add simple script that changes floating points in genesis.json to ints
|
#!/usr/bin/python
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
import sys
if len(sys.argv) == 3:
infile = sys.argv[1]
outfile = sys.argv[2]
else:
infile = raw_input("Enter input file:")
outfile = raw_input("Enter output file:")
with open(infile, "r") as gen:
with open(outfile, "w") as out:
for line in gen:
if isfloat(line):
out.write(" " * 12 + str(int(float(line) * 1000000)) + "\n")
else:
out.write(line)
|
<commit_before><commit_msg>Add simple script that changes floating points in genesis.json to ints<commit_after>
|
#!/usr/bin/python
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
import sys
if len(sys.argv) == 3:
infile = sys.argv[1]
outfile = sys.argv[2]
else:
infile = raw_input("Enter input file:")
outfile = raw_input("Enter output file:")
with open(infile, "r") as gen:
with open(outfile, "w") as out:
for line in gen:
if isfloat(line):
out.write(" " * 12 + str(int(float(line) * 1000000)) + "\n")
else:
out.write(line)
|
Add simple script that changes floating points in genesis.json to ints#!/usr/bin/python
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
import sys
if len(sys.argv) == 3:
infile = sys.argv[1]
outfile = sys.argv[2]
else:
infile = raw_input("Enter input file:")
outfile = raw_input("Enter output file:")
with open(infile, "r") as gen:
with open(outfile, "w") as out:
for line in gen:
if isfloat(line):
out.write(" " * 12 + str(int(float(line) * 1000000)) + "\n")
else:
out.write(line)
|
<commit_before><commit_msg>Add simple script that changes floating points in genesis.json to ints<commit_after>#!/usr/bin/python
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
import sys
if len(sys.argv) == 3:
infile = sys.argv[1]
outfile = sys.argv[2]
else:
infile = raw_input("Enter input file:")
outfile = raw_input("Enter output file:")
with open(infile, "r") as gen:
with open(outfile, "w") as out:
for line in gen:
if isfloat(line):
out.write(" " * 12 + str(int(float(line) * 1000000)) + "\n")
else:
out.write(line)
|
|
436840ee3d41ca06c6a94745144ed3b8453816d8
|
geotrek/common/management/commands/clean_attachments.py
|
geotrek/common/management/commands/clean_attachments.py
|
from pathlib import Path
from django.core.management.base import BaseCommand
from django.conf import settings
from geotrek.common.models import Attachment
from easy_thumbnails.models import Thumbnail
class Command(BaseCommand):
help = "Remove files for deleted attachments"
def handle(self, *args, **options):
paperclip_dir = Path(settings.MEDIA_ROOT) / 'paperclip'
attachments = set(Attachment.objects.values_list('attachment_file', flat=True))
thumbnails = set(Thumbnail.objects.values_list('name', flat=True))
if options['verbosity'] >= 1:
self.stdout.write(u"Attachments: {} / Thumbnails: {}".format(len(attachments), len(thumbnails)))
total = 0
deleted = 0
for path in paperclip_dir.glob('**/*'):
if not path.is_file():
continue
total += 1
relative = str(path.relative_to(settings.MEDIA_ROOT))
if relative in attachments:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Found".format(relative))
continue
if relative in thumbnails:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Thumbnail".format(relative))
continue
deleted += 1
path.unlink()
if options['verbosity'] >= 1:
self.stdout.write(u"{}... DELETED".format(relative))
if options['verbosity'] >= 1:
self.stdout.write(u"Files: {} / Deleted: {}".format(total, deleted))
|
Add a command to clean attachments
|
Add a command to clean attachments
|
Python
|
bsd-2-clause
|
GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin
|
Add a command to clean attachments
|
from pathlib import Path
from django.core.management.base import BaseCommand
from django.conf import settings
from geotrek.common.models import Attachment
from easy_thumbnails.models import Thumbnail
class Command(BaseCommand):
help = "Remove files for deleted attachments"
def handle(self, *args, **options):
paperclip_dir = Path(settings.MEDIA_ROOT) / 'paperclip'
attachments = set(Attachment.objects.values_list('attachment_file', flat=True))
thumbnails = set(Thumbnail.objects.values_list('name', flat=True))
if options['verbosity'] >= 1:
self.stdout.write(u"Attachments: {} / Thumbnails: {}".format(len(attachments), len(thumbnails)))
total = 0
deleted = 0
for path in paperclip_dir.glob('**/*'):
if not path.is_file():
continue
total += 1
relative = str(path.relative_to(settings.MEDIA_ROOT))
if relative in attachments:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Found".format(relative))
continue
if relative in thumbnails:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Thumbnail".format(relative))
continue
deleted += 1
path.unlink()
if options['verbosity'] >= 1:
self.stdout.write(u"{}... DELETED".format(relative))
if options['verbosity'] >= 1:
self.stdout.write(u"Files: {} / Deleted: {}".format(total, deleted))
|
<commit_before><commit_msg>Add a command to clean attachments<commit_after>
|
from pathlib import Path
from django.core.management.base import BaseCommand
from django.conf import settings
from geotrek.common.models import Attachment
from easy_thumbnails.models import Thumbnail
class Command(BaseCommand):
help = "Remove files for deleted attachments"
def handle(self, *args, **options):
paperclip_dir = Path(settings.MEDIA_ROOT) / 'paperclip'
attachments = set(Attachment.objects.values_list('attachment_file', flat=True))
thumbnails = set(Thumbnail.objects.values_list('name', flat=True))
if options['verbosity'] >= 1:
self.stdout.write(u"Attachments: {} / Thumbnails: {}".format(len(attachments), len(thumbnails)))
total = 0
deleted = 0
for path in paperclip_dir.glob('**/*'):
if not path.is_file():
continue
total += 1
relative = str(path.relative_to(settings.MEDIA_ROOT))
if relative in attachments:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Found".format(relative))
continue
if relative in thumbnails:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Thumbnail".format(relative))
continue
deleted += 1
path.unlink()
if options['verbosity'] >= 1:
self.stdout.write(u"{}... DELETED".format(relative))
if options['verbosity'] >= 1:
self.stdout.write(u"Files: {} / Deleted: {}".format(total, deleted))
|
Add a command to clean attachmentsfrom pathlib import Path
from django.core.management.base import BaseCommand
from django.conf import settings
from geotrek.common.models import Attachment
from easy_thumbnails.models import Thumbnail
class Command(BaseCommand):
help = "Remove files for deleted attachments"
def handle(self, *args, **options):
paperclip_dir = Path(settings.MEDIA_ROOT) / 'paperclip'
attachments = set(Attachment.objects.values_list('attachment_file', flat=True))
thumbnails = set(Thumbnail.objects.values_list('name', flat=True))
if options['verbosity'] >= 1:
self.stdout.write(u"Attachments: {} / Thumbnails: {}".format(len(attachments), len(thumbnails)))
total = 0
deleted = 0
for path in paperclip_dir.glob('**/*'):
if not path.is_file():
continue
total += 1
relative = str(path.relative_to(settings.MEDIA_ROOT))
if relative in attachments:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Found".format(relative))
continue
if relative in thumbnails:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Thumbnail".format(relative))
continue
deleted += 1
path.unlink()
if options['verbosity'] >= 1:
self.stdout.write(u"{}... DELETED".format(relative))
if options['verbosity'] >= 1:
self.stdout.write(u"Files: {} / Deleted: {}".format(total, deleted))
|
<commit_before><commit_msg>Add a command to clean attachments<commit_after>from pathlib import Path
from django.core.management.base import BaseCommand
from django.conf import settings
from geotrek.common.models import Attachment
from easy_thumbnails.models import Thumbnail
class Command(BaseCommand):
help = "Remove files for deleted attachments"
def handle(self, *args, **options):
paperclip_dir = Path(settings.MEDIA_ROOT) / 'paperclip'
attachments = set(Attachment.objects.values_list('attachment_file', flat=True))
thumbnails = set(Thumbnail.objects.values_list('name', flat=True))
if options['verbosity'] >= 1:
self.stdout.write(u"Attachments: {} / Thumbnails: {}".format(len(attachments), len(thumbnails)))
total = 0
deleted = 0
for path in paperclip_dir.glob('**/*'):
if not path.is_file():
continue
total += 1
relative = str(path.relative_to(settings.MEDIA_ROOT))
if relative in attachments:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Found".format(relative))
continue
if relative in thumbnails:
if options['verbosity'] >= 2:
self.stdout.write(u"{}... Thumbnail".format(relative))
continue
deleted += 1
path.unlink()
if options['verbosity'] >= 1:
self.stdout.write(u"{}... DELETED".format(relative))
if options['verbosity'] >= 1:
self.stdout.write(u"Files: {} / Deleted: {}".format(total, deleted))
|
|
4e1f68f1c033dc76930b2d651fe256dff11970e1
|
examples/scripts/weatherValuesNamed.py
|
examples/scripts/weatherValuesNamed.py
|
from medea import Tokenizer
from examples.scripts import timeit
def run():
source = open('examples/data/weathermap.json')
tokenizer = Tokenizer(source)
for tok, val in tokenizer.tokenizeValuesNamed("rain"):
print(tok, val)
timeit(run)
|
Bring in timeit, and implicitly overclock with freq
|
Bring in timeit, and implicitly overclock with freq
|
Python
|
agpl-3.0
|
ShrimpingIt/medea,ShrimpingIt/medea
|
Bring in timeit, and implicitly overclock with freq
|
from medea import Tokenizer
from examples.scripts import timeit
def run():
source = open('examples/data/weathermap.json')
tokenizer = Tokenizer(source)
for tok, val in tokenizer.tokenizeValuesNamed("rain"):
print(tok, val)
timeit(run)
|
<commit_before><commit_msg>Bring in timeit, and implicitly overclock with freq<commit_after>
|
from medea import Tokenizer
from examples.scripts import timeit
def run():
source = open('examples/data/weathermap.json')
tokenizer = Tokenizer(source)
for tok, val in tokenizer.tokenizeValuesNamed("rain"):
print(tok, val)
timeit(run)
|
Bring in timeit, and implicitly overclock with freqfrom medea import Tokenizer
from examples.scripts import timeit
def run():
source = open('examples/data/weathermap.json')
tokenizer = Tokenizer(source)
for tok, val in tokenizer.tokenizeValuesNamed("rain"):
print(tok, val)
timeit(run)
|
<commit_before><commit_msg>Bring in timeit, and implicitly overclock with freq<commit_after>from medea import Tokenizer
from examples.scripts import timeit
def run():
source = open('examples/data/weathermap.json')
tokenizer = Tokenizer(source)
for tok, val in tokenizer.tokenizeValuesNamed("rain"):
print(tok, val)
timeit(run)
|
|
8d4255538203336e6f4d72845723c179c38547e0
|
datastore/migrations/0028_auto_20160713_0000.py
|
datastore/migrations/0028_auto_20160713_0000.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datastore', '0027_auto_20160712_2251'),
]
operations = [
migrations.RemoveField(
model_name='dailyusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummaryreporting',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='interpretationsummary',
name='project_block',
),
migrations.RemoveField(
model_name='monthlyaverageusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyaverageusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummaryreporting',
name='interpretation_summary',
),
migrations.DeleteModel(
name='DailyUsageBaseline',
),
migrations.DeleteModel(
name='DailyUsageReporting',
),
migrations.DeleteModel(
name='DailyUsageSummaryActual',
),
migrations.DeleteModel(
name='DailyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='DailyUsageSummaryReporting',
),
migrations.DeleteModel(
name='InterpretationSummary',
),
migrations.DeleteModel(
name='MonthlyAverageUsageBaseline',
),
migrations.DeleteModel(
name='MonthlyAverageUsageReporting',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryActual',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryReporting',
),
]
|
Add migration for deleted gunk
|
Add migration for deleted gunk
|
Python
|
mit
|
impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore
|
Add migration for deleted gunk
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datastore', '0027_auto_20160712_2251'),
]
operations = [
migrations.RemoveField(
model_name='dailyusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummaryreporting',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='interpretationsummary',
name='project_block',
),
migrations.RemoveField(
model_name='monthlyaverageusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyaverageusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummaryreporting',
name='interpretation_summary',
),
migrations.DeleteModel(
name='DailyUsageBaseline',
),
migrations.DeleteModel(
name='DailyUsageReporting',
),
migrations.DeleteModel(
name='DailyUsageSummaryActual',
),
migrations.DeleteModel(
name='DailyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='DailyUsageSummaryReporting',
),
migrations.DeleteModel(
name='InterpretationSummary',
),
migrations.DeleteModel(
name='MonthlyAverageUsageBaseline',
),
migrations.DeleteModel(
name='MonthlyAverageUsageReporting',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryActual',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryReporting',
),
]
|
<commit_before><commit_msg>Add migration for deleted gunk<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datastore', '0027_auto_20160712_2251'),
]
operations = [
migrations.RemoveField(
model_name='dailyusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummaryreporting',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='interpretationsummary',
name='project_block',
),
migrations.RemoveField(
model_name='monthlyaverageusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyaverageusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummaryreporting',
name='interpretation_summary',
),
migrations.DeleteModel(
name='DailyUsageBaseline',
),
migrations.DeleteModel(
name='DailyUsageReporting',
),
migrations.DeleteModel(
name='DailyUsageSummaryActual',
),
migrations.DeleteModel(
name='DailyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='DailyUsageSummaryReporting',
),
migrations.DeleteModel(
name='InterpretationSummary',
),
migrations.DeleteModel(
name='MonthlyAverageUsageBaseline',
),
migrations.DeleteModel(
name='MonthlyAverageUsageReporting',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryActual',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryReporting',
),
]
|
Add migration for deleted gunk# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datastore', '0027_auto_20160712_2251'),
]
operations = [
migrations.RemoveField(
model_name='dailyusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummaryreporting',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='interpretationsummary',
name='project_block',
),
migrations.RemoveField(
model_name='monthlyaverageusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyaverageusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummaryreporting',
name='interpretation_summary',
),
migrations.DeleteModel(
name='DailyUsageBaseline',
),
migrations.DeleteModel(
name='DailyUsageReporting',
),
migrations.DeleteModel(
name='DailyUsageSummaryActual',
),
migrations.DeleteModel(
name='DailyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='DailyUsageSummaryReporting',
),
migrations.DeleteModel(
name='InterpretationSummary',
),
migrations.DeleteModel(
name='MonthlyAverageUsageBaseline',
),
migrations.DeleteModel(
name='MonthlyAverageUsageReporting',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryActual',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryReporting',
),
]
|
<commit_before><commit_msg>Add migration for deleted gunk<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-13 00:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datastore', '0027_auto_20160712_2251'),
]
operations = [
migrations.RemoveField(
model_name='dailyusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='dailyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='dailyusagesummaryreporting',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='interpretationsummary',
name='project_block',
),
migrations.RemoveField(
model_name='monthlyaverageusagebaseline',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyaverageusagereporting',
name='meter_run',
),
migrations.RemoveField(
model_name='monthlyusagesummaryactual',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummarybaseline',
name='interpretation_summary',
),
migrations.RemoveField(
model_name='monthlyusagesummaryreporting',
name='interpretation_summary',
),
migrations.DeleteModel(
name='DailyUsageBaseline',
),
migrations.DeleteModel(
name='DailyUsageReporting',
),
migrations.DeleteModel(
name='DailyUsageSummaryActual',
),
migrations.DeleteModel(
name='DailyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='DailyUsageSummaryReporting',
),
migrations.DeleteModel(
name='InterpretationSummary',
),
migrations.DeleteModel(
name='MonthlyAverageUsageBaseline',
),
migrations.DeleteModel(
name='MonthlyAverageUsageReporting',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryActual',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryBaseline',
),
migrations.DeleteModel(
name='MonthlyUsageSummaryReporting',
),
]
|
|
59882da97dafa0e103550d24795105e4e73f6ff5
|
statsmodels/base/tests/test_optimize.py
|
statsmodels/base/tests/test_optimize.py
|
from numpy.testing import assert_
from statsmodels.base.model import (_fit_mle_newton, _fit_mle_nm,
_fit_mle_bfgs, _fit_mle_cg,
_fit_mle_ncg, _fit_mle_powell)
fit_funcs = {
'newton': _fit_mle_newton,
'nm': _fit_mle_nm, # Nelder-Mead
'bfgs': _fit_mle_bfgs,
'cg': _fit_mle_cg,
'ncg': _fit_mle_ncg,
'powell': _fit_mle_powell
}
def dummy_func(x):
return x**2
def dummy_score(x):
return 2*x
def dummy_hess(x):
return [[2]]
def test_full_output_false():
# just a smoke test
# newton needs f, score, start, fargs, kwargs
# bfgs needs f, score start, fargs, kwargs
# nm needs ""
# cg ""
# ncg ""
# powell ""
for method in fit_funcs:
func = fit_funcs[method]
if method == "newton":
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
hess=dummy_hess, full_output=False, disp=0)
else:
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
full_output=False, disp=0)
assert_(xopts == None)
if method == "powell":
#NOTE: I think I reported this? Might be version/optimize API
# dependent
assert_(retvals.shape == () and retvals.size == 1)
else:
assert_(len(retvals)==1)
|
Test fit funcs for full_output = False
|
TST: Test fit funcs for full_output = False
|
Python
|
bsd-3-clause
|
bsipocz/statsmodels,bavardage/statsmodels,jseabold/statsmodels,DonBeo/statsmodels,waynenilsen/statsmodels,detrout/debian-statsmodels,josef-pkt/statsmodels,kiyoto/statsmodels,cbmoore/statsmodels,Averroes/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,adammenges/statsmodels,bashtage/statsmodels,YihaoLu/statsmodels,wdurhamh/statsmodels,wzbozon/statsmodels,yarikoptic/pystatsmodels,edhuckle/statsmodels,statsmodels/statsmodels,nvoron23/statsmodels,bavardage/statsmodels,bashtage/statsmodels,kiyoto/statsmodels,jstoxrocky/statsmodels,alekz112/statsmodels,kiyoto/statsmodels,yarikoptic/pystatsmodels,rgommers/statsmodels,detrout/debian-statsmodels,adammenges/statsmodels,yl565/statsmodels,ChadFulton/statsmodels,jseabold/statsmodels,jstoxrocky/statsmodels,detrout/debian-statsmodels,hainm/statsmodels,phobson/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,rgommers/statsmodels,wkfwkf/statsmodels,bert9bert/statsmodels,jstoxrocky/statsmodels,statsmodels/statsmodels,YihaoLu/statsmodels,waynenilsen/statsmodels,astocko/statsmodels,DonBeo/statsmodels,bsipocz/statsmodels,kiyoto/statsmodels,phobson/statsmodels,rgommers/statsmodels,gef756/statsmodels,adammenges/statsmodels,gef756/statsmodels,wwf5067/statsmodels,phobson/statsmodels,josef-pkt/statsmodels,wkfwkf/statsmodels,edhuckle/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,astocko/statsmodels,nguyentu1602/statsmodels,bzero/statsmodels,cbmoore/statsmodels,bsipocz/statsmodels,josef-pkt/statsmodels,edhuckle/statsmodels,bashtage/statsmodels,cbmoore/statsmodels,saketkc/statsmodels,yl565/statsmodels,josef-pkt/statsmodels,hlin117/statsmodels,edhuckle/statsmodels,waynenilsen/statsmodels,wdurhamh/statsmodels,wdurhamh/statsmodels,wwf5067/statsmodels,ChadFulton/statsmodels,cbmoore/statsmodels,jseabold/statsmodels,wkfwkf/statsmodels,jstoxrocky/statsmodels,bavardage/statsmodels,jseabold/statsmodels,nvoron23/statsmodels,Averroes/statsmodels,wdurhamh/statsmodels,alekz112/statsmodels,huongttlan/statsmodels,bert9bert/statsmodels,DonBeo/statsmodels,josef-pkt/statsmodels,nguyentu1602/statsmodels,wzbozon/statsmodels,musically-ut/statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,jseabold/statsmodels,rgommers/statsmodels,wzbozon/statsmodels,bert9bert/statsmodels,saketkc/statsmodels,bert9bert/statsmodels,hlin117/statsmodels,bzero/statsmodels,musically-ut/statsmodels,yarikoptic/pystatsmodels,kiyoto/statsmodels,phobson/statsmodels,edhuckle/statsmodels,huongttlan/statsmodels,wzbozon/statsmodels,wwf5067/statsmodels,rgommers/statsmodels,waynenilsen/statsmodels,huongttlan/statsmodels,saketkc/statsmodels,wkfwkf/statsmodels,gef756/statsmodels,astocko/statsmodels,yl565/statsmodels,saketkc/statsmodels,adammenges/statsmodels,yl565/statsmodels,gef756/statsmodels,bzero/statsmodels,nvoron23/statsmodels,bert9bert/statsmodels,hainm/statsmodels,musically-ut/statsmodels,bzero/statsmodels,DonBeo/statsmodels,statsmodels/statsmodels,wkfwkf/statsmodels,statsmodels/statsmodels,hlin117/statsmodels,Averroes/statsmodels,bavardage/statsmodels,phobson/statsmodels,gef756/statsmodels,wdurhamh/statsmodels,wzbozon/statsmodels,Averroes/statsmodels,yl565/statsmodels,nvoron23/statsmodels,detrout/debian-statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,hlin117/statsmodels,nvoron23/statsmodels,hainm/statsmodels,huongttlan/statsmodels,hainm/statsmodels,YihaoLu/statsmodels,cbmoore/statsmodels,ChadFulton/statsmodels,alekz112/statsmodels,josef-pkt/statsmodels,DonBeo/statsmodels,nguyentu1602/statsmodels,saketkc/statsmodels,astocko/statsmodels,bzero/statsmodels,bsipocz/statsmodels,wwf5067/statsmodels,musically-ut/statsmodels,bavardage/statsmodels,alekz112/statsmodels
|
TST: Test fit funcs for full_output = False
|
from numpy.testing import assert_
from statsmodels.base.model import (_fit_mle_newton, _fit_mle_nm,
_fit_mle_bfgs, _fit_mle_cg,
_fit_mle_ncg, _fit_mle_powell)
fit_funcs = {
'newton': _fit_mle_newton,
'nm': _fit_mle_nm, # Nelder-Mead
'bfgs': _fit_mle_bfgs,
'cg': _fit_mle_cg,
'ncg': _fit_mle_ncg,
'powell': _fit_mle_powell
}
def dummy_func(x):
return x**2
def dummy_score(x):
return 2*x
def dummy_hess(x):
return [[2]]
def test_full_output_false():
# just a smoke test
# newton needs f, score, start, fargs, kwargs
# bfgs needs f, score start, fargs, kwargs
# nm needs ""
# cg ""
# ncg ""
# powell ""
for method in fit_funcs:
func = fit_funcs[method]
if method == "newton":
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
hess=dummy_hess, full_output=False, disp=0)
else:
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
full_output=False, disp=0)
assert_(xopts == None)
if method == "powell":
#NOTE: I think I reported this? Might be version/optimize API
# dependent
assert_(retvals.shape == () and retvals.size == 1)
else:
assert_(len(retvals)==1)
|
<commit_before><commit_msg>TST: Test fit funcs for full_output = False<commit_after>
|
from numpy.testing import assert_
from statsmodels.base.model import (_fit_mle_newton, _fit_mle_nm,
_fit_mle_bfgs, _fit_mle_cg,
_fit_mle_ncg, _fit_mle_powell)
fit_funcs = {
'newton': _fit_mle_newton,
'nm': _fit_mle_nm, # Nelder-Mead
'bfgs': _fit_mle_bfgs,
'cg': _fit_mle_cg,
'ncg': _fit_mle_ncg,
'powell': _fit_mle_powell
}
def dummy_func(x):
return x**2
def dummy_score(x):
return 2*x
def dummy_hess(x):
return [[2]]
def test_full_output_false():
# just a smoke test
# newton needs f, score, start, fargs, kwargs
# bfgs needs f, score start, fargs, kwargs
# nm needs ""
# cg ""
# ncg ""
# powell ""
for method in fit_funcs:
func = fit_funcs[method]
if method == "newton":
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
hess=dummy_hess, full_output=False, disp=0)
else:
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
full_output=False, disp=0)
assert_(xopts == None)
if method == "powell":
#NOTE: I think I reported this? Might be version/optimize API
# dependent
assert_(retvals.shape == () and retvals.size == 1)
else:
assert_(len(retvals)==1)
|
TST: Test fit funcs for full_output = Falsefrom numpy.testing import assert_
from statsmodels.base.model import (_fit_mle_newton, _fit_mle_nm,
_fit_mle_bfgs, _fit_mle_cg,
_fit_mle_ncg, _fit_mle_powell)
fit_funcs = {
'newton': _fit_mle_newton,
'nm': _fit_mle_nm, # Nelder-Mead
'bfgs': _fit_mle_bfgs,
'cg': _fit_mle_cg,
'ncg': _fit_mle_ncg,
'powell': _fit_mle_powell
}
def dummy_func(x):
return x**2
def dummy_score(x):
return 2*x
def dummy_hess(x):
return [[2]]
def test_full_output_false():
# just a smoke test
# newton needs f, score, start, fargs, kwargs
# bfgs needs f, score start, fargs, kwargs
# nm needs ""
# cg ""
# ncg ""
# powell ""
for method in fit_funcs:
func = fit_funcs[method]
if method == "newton":
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
hess=dummy_hess, full_output=False, disp=0)
else:
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
full_output=False, disp=0)
assert_(xopts == None)
if method == "powell":
#NOTE: I think I reported this? Might be version/optimize API
# dependent
assert_(retvals.shape == () and retvals.size == 1)
else:
assert_(len(retvals)==1)
|
<commit_before><commit_msg>TST: Test fit funcs for full_output = False<commit_after>from numpy.testing import assert_
from statsmodels.base.model import (_fit_mle_newton, _fit_mle_nm,
_fit_mle_bfgs, _fit_mle_cg,
_fit_mle_ncg, _fit_mle_powell)
fit_funcs = {
'newton': _fit_mle_newton,
'nm': _fit_mle_nm, # Nelder-Mead
'bfgs': _fit_mle_bfgs,
'cg': _fit_mle_cg,
'ncg': _fit_mle_ncg,
'powell': _fit_mle_powell
}
def dummy_func(x):
return x**2
def dummy_score(x):
return 2*x
def dummy_hess(x):
return [[2]]
def test_full_output_false():
# just a smoke test
# newton needs f, score, start, fargs, kwargs
# bfgs needs f, score start, fargs, kwargs
# nm needs ""
# cg ""
# ncg ""
# powell ""
for method in fit_funcs:
func = fit_funcs[method]
if method == "newton":
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
hess=dummy_hess, full_output=False, disp=0)
else:
xopts, retvals = func(dummy_func, dummy_score, [1], (), {},
full_output=False, disp=0)
assert_(xopts == None)
if method == "powell":
#NOTE: I think I reported this? Might be version/optimize API
# dependent
assert_(retvals.shape == () and retvals.size == 1)
else:
assert_(len(retvals)==1)
|
|
d2018ec07a79b1d0fd7c0ee3c34faa0c4c5bf723
|
src/python/borg/tools/plot_performance.py
|
src/python/borg/tools/plot_performance.py
|
"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from borg.tools.plot_performance import main
raise SystemExit(main())
from cargo.log import get_logger
log = get_logger(__name__, default_level = "INFO")
def plot_trial(session, trial_row):
"""
Plot the specified trial.
"""
# get the relevant attempts
from sqlalchemy import and_
from borg.data import RunAttemptRow
attempt_rows = \
session \
.query(RunAttemptRow) \
.filter(
and_(
RunAttemptRow.trials.contains(trial_row),
RunAttemptRow.answer != None,
),
) \
.order_by(RunAttemptRow.cost)
# break them into series
attempts = {}
budget = None
for attempt_row in attempt_rows:
solver_name = attempt_row.solver_name
solver_attempts = attempts.get(solver_name, [])
solver_attempts.append(attempt_row.cost)
attempts[solver_name] = solver_attempts
# determine the budget
if budget is None:
budget = attempt_row.budget
else:
if budget != attempt_row.budget:
raise RuntimeError("multiple budgets in trial")
session.commit()
# plot the series
import pylab
pylab.title("Solver Performance (Trial %s)" % trial_row.uuid)
for (name, costs) in attempts.iteritems():
x_values = [0.0] + [c.as_s for c in costs] + [budget.as_s]
y_values = range(len(costs) + 1) + [len(costs)]
pylab.plot(x_values, y_values, label = name)
pylab.legend()
pylab.show()
def main():
"""
Run the script.
"""
# get command line arguments
import borg.data
from cargo.flags import parse_given
(trial_uuid,) = parse_given(usage = "%prog <trial_uuid> [options]")
# set up logging
from cargo.log import enable_default_logging
enable_default_logging()
get_logger("sqlalchemy.engine", level = "DETAIL")
# connect to the database and go
from cargo.sql.alchemy import SQL_Engines
with SQL_Engines.default:
from cargo.sql.alchemy import make_session
from borg.data import research_connect
ResearchSession = make_session(bind = research_connect())
with ResearchSession() as session:
# get the trial
from borg.data import TrialRow
trial_row = session.query(TrialRow).get(trial_uuid)
if trial_row is None:
raise ValueError("no such trial")
# and plot it
plot_trial(session, trial_row)
|
Add a simple performance plotting tool.
|
Add a simple performance plotting tool.
|
Python
|
mit
|
borg-project/borg
|
Add a simple performance plotting tool.
|
"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from borg.tools.plot_performance import main
raise SystemExit(main())
from cargo.log import get_logger
log = get_logger(__name__, default_level = "INFO")
def plot_trial(session, trial_row):
"""
Plot the specified trial.
"""
# get the relevant attempts
from sqlalchemy import and_
from borg.data import RunAttemptRow
attempt_rows = \
session \
.query(RunAttemptRow) \
.filter(
and_(
RunAttemptRow.trials.contains(trial_row),
RunAttemptRow.answer != None,
),
) \
.order_by(RunAttemptRow.cost)
# break them into series
attempts = {}
budget = None
for attempt_row in attempt_rows:
solver_name = attempt_row.solver_name
solver_attempts = attempts.get(solver_name, [])
solver_attempts.append(attempt_row.cost)
attempts[solver_name] = solver_attempts
# determine the budget
if budget is None:
budget = attempt_row.budget
else:
if budget != attempt_row.budget:
raise RuntimeError("multiple budgets in trial")
session.commit()
# plot the series
import pylab
pylab.title("Solver Performance (Trial %s)" % trial_row.uuid)
for (name, costs) in attempts.iteritems():
x_values = [0.0] + [c.as_s for c in costs] + [budget.as_s]
y_values = range(len(costs) + 1) + [len(costs)]
pylab.plot(x_values, y_values, label = name)
pylab.legend()
pylab.show()
def main():
"""
Run the script.
"""
# get command line arguments
import borg.data
from cargo.flags import parse_given
(trial_uuid,) = parse_given(usage = "%prog <trial_uuid> [options]")
# set up logging
from cargo.log import enable_default_logging
enable_default_logging()
get_logger("sqlalchemy.engine", level = "DETAIL")
# connect to the database and go
from cargo.sql.alchemy import SQL_Engines
with SQL_Engines.default:
from cargo.sql.alchemy import make_session
from borg.data import research_connect
ResearchSession = make_session(bind = research_connect())
with ResearchSession() as session:
# get the trial
from borg.data import TrialRow
trial_row = session.query(TrialRow).get(trial_uuid)
if trial_row is None:
raise ValueError("no such trial")
# and plot it
plot_trial(session, trial_row)
|
<commit_before><commit_msg>Add a simple performance plotting tool.<commit_after>
|
"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from borg.tools.plot_performance import main
raise SystemExit(main())
from cargo.log import get_logger
log = get_logger(__name__, default_level = "INFO")
def plot_trial(session, trial_row):
"""
Plot the specified trial.
"""
# get the relevant attempts
from sqlalchemy import and_
from borg.data import RunAttemptRow
attempt_rows = \
session \
.query(RunAttemptRow) \
.filter(
and_(
RunAttemptRow.trials.contains(trial_row),
RunAttemptRow.answer != None,
),
) \
.order_by(RunAttemptRow.cost)
# break them into series
attempts = {}
budget = None
for attempt_row in attempt_rows:
solver_name = attempt_row.solver_name
solver_attempts = attempts.get(solver_name, [])
solver_attempts.append(attempt_row.cost)
attempts[solver_name] = solver_attempts
# determine the budget
if budget is None:
budget = attempt_row.budget
else:
if budget != attempt_row.budget:
raise RuntimeError("multiple budgets in trial")
session.commit()
# plot the series
import pylab
pylab.title("Solver Performance (Trial %s)" % trial_row.uuid)
for (name, costs) in attempts.iteritems():
x_values = [0.0] + [c.as_s for c in costs] + [budget.as_s]
y_values = range(len(costs) + 1) + [len(costs)]
pylab.plot(x_values, y_values, label = name)
pylab.legend()
pylab.show()
def main():
"""
Run the script.
"""
# get command line arguments
import borg.data
from cargo.flags import parse_given
(trial_uuid,) = parse_given(usage = "%prog <trial_uuid> [options]")
# set up logging
from cargo.log import enable_default_logging
enable_default_logging()
get_logger("sqlalchemy.engine", level = "DETAIL")
# connect to the database and go
from cargo.sql.alchemy import SQL_Engines
with SQL_Engines.default:
from cargo.sql.alchemy import make_session
from borg.data import research_connect
ResearchSession = make_session(bind = research_connect())
with ResearchSession() as session:
# get the trial
from borg.data import TrialRow
trial_row = session.query(TrialRow).get(trial_uuid)
if trial_row is None:
raise ValueError("no such trial")
# and plot it
plot_trial(session, trial_row)
|
Add a simple performance plotting tool."""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from borg.tools.plot_performance import main
raise SystemExit(main())
from cargo.log import get_logger
log = get_logger(__name__, default_level = "INFO")
def plot_trial(session, trial_row):
"""
Plot the specified trial.
"""
# get the relevant attempts
from sqlalchemy import and_
from borg.data import RunAttemptRow
attempt_rows = \
session \
.query(RunAttemptRow) \
.filter(
and_(
RunAttemptRow.trials.contains(trial_row),
RunAttemptRow.answer != None,
),
) \
.order_by(RunAttemptRow.cost)
# break them into series
attempts = {}
budget = None
for attempt_row in attempt_rows:
solver_name = attempt_row.solver_name
solver_attempts = attempts.get(solver_name, [])
solver_attempts.append(attempt_row.cost)
attempts[solver_name] = solver_attempts
# determine the budget
if budget is None:
budget = attempt_row.budget
else:
if budget != attempt_row.budget:
raise RuntimeError("multiple budgets in trial")
session.commit()
# plot the series
import pylab
pylab.title("Solver Performance (Trial %s)" % trial_row.uuid)
for (name, costs) in attempts.iteritems():
x_values = [0.0] + [c.as_s for c in costs] + [budget.as_s]
y_values = range(len(costs) + 1) + [len(costs)]
pylab.plot(x_values, y_values, label = name)
pylab.legend()
pylab.show()
def main():
"""
Run the script.
"""
# get command line arguments
import borg.data
from cargo.flags import parse_given
(trial_uuid,) = parse_given(usage = "%prog <trial_uuid> [options]")
# set up logging
from cargo.log import enable_default_logging
enable_default_logging()
get_logger("sqlalchemy.engine", level = "DETAIL")
# connect to the database and go
from cargo.sql.alchemy import SQL_Engines
with SQL_Engines.default:
from cargo.sql.alchemy import make_session
from borg.data import research_connect
ResearchSession = make_session(bind = research_connect())
with ResearchSession() as session:
# get the trial
from borg.data import TrialRow
trial_row = session.query(TrialRow).get(trial_uuid)
if trial_row is None:
raise ValueError("no such trial")
# and plot it
plot_trial(session, trial_row)
|
<commit_before><commit_msg>Add a simple performance plotting tool.<commit_after>"""
@author: Bryan Silverthorn <bcs@cargo-cult.org>
"""
if __name__ == "__main__":
from borg.tools.plot_performance import main
raise SystemExit(main())
from cargo.log import get_logger
log = get_logger(__name__, default_level = "INFO")
def plot_trial(session, trial_row):
"""
Plot the specified trial.
"""
# get the relevant attempts
from sqlalchemy import and_
from borg.data import RunAttemptRow
attempt_rows = \
session \
.query(RunAttemptRow) \
.filter(
and_(
RunAttemptRow.trials.contains(trial_row),
RunAttemptRow.answer != None,
),
) \
.order_by(RunAttemptRow.cost)
# break them into series
attempts = {}
budget = None
for attempt_row in attempt_rows:
solver_name = attempt_row.solver_name
solver_attempts = attempts.get(solver_name, [])
solver_attempts.append(attempt_row.cost)
attempts[solver_name] = solver_attempts
# determine the budget
if budget is None:
budget = attempt_row.budget
else:
if budget != attempt_row.budget:
raise RuntimeError("multiple budgets in trial")
session.commit()
# plot the series
import pylab
pylab.title("Solver Performance (Trial %s)" % trial_row.uuid)
for (name, costs) in attempts.iteritems():
x_values = [0.0] + [c.as_s for c in costs] + [budget.as_s]
y_values = range(len(costs) + 1) + [len(costs)]
pylab.plot(x_values, y_values, label = name)
pylab.legend()
pylab.show()
def main():
"""
Run the script.
"""
# get command line arguments
import borg.data
from cargo.flags import parse_given
(trial_uuid,) = parse_given(usage = "%prog <trial_uuid> [options]")
# set up logging
from cargo.log import enable_default_logging
enable_default_logging()
get_logger("sqlalchemy.engine", level = "DETAIL")
# connect to the database and go
from cargo.sql.alchemy import SQL_Engines
with SQL_Engines.default:
from cargo.sql.alchemy import make_session
from borg.data import research_connect
ResearchSession = make_session(bind = research_connect())
with ResearchSession() as session:
# get the trial
from borg.data import TrialRow
trial_row = session.query(TrialRow).get(trial_uuid)
if trial_row is None:
raise ValueError("no such trial")
# and plot it
plot_trial(session, trial_row)
|
|
ce9ff3ac42d388d7898ea3e88dce3d2eb4168c30
|
hc/accounts/migrations/0042_remove_member_rw.py
|
hc/accounts/migrations/0042_remove_member_rw.py
|
# Generated by Django 3.2.4 on 2021-07-22 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_fill_role'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='rw',
),
]
|
Add a migration to remove Member.rw
|
Add a migration to remove Member.rw
|
Python
|
bsd-3-clause
|
iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks
|
Add a migration to remove Member.rw
|
# Generated by Django 3.2.4 on 2021-07-22 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_fill_role'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='rw',
),
]
|
<commit_before><commit_msg>Add a migration to remove Member.rw<commit_after>
|
# Generated by Django 3.2.4 on 2021-07-22 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_fill_role'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='rw',
),
]
|
Add a migration to remove Member.rw# Generated by Django 3.2.4 on 2021-07-22 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_fill_role'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='rw',
),
]
|
<commit_before><commit_msg>Add a migration to remove Member.rw<commit_after># Generated by Django 3.2.4 on 2021-07-22 14:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_fill_role'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='rw',
),
]
|
|
814974c6736f1a9aef6fec7abf4153f194a231c7
|
simpleubjson/exceptions.py
|
simpleubjson/exceptions.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
|
Add missed exception module. Ooops.
|
Add missed exception module. Ooops.
|
Python
|
bsd-2-clause
|
samipshah/simpleubjson,brainwater/simpleubjson,498888197/simpleubjson,kxepal/simpleubjson
|
Add missed exception module. Ooops.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
|
<commit_before><commit_msg>Add missed exception module. Ooops.<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
|
Add missed exception module. Ooops.# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
|
<commit_before><commit_msg>Add missed exception module. Ooops.<commit_after># -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
class DecodeError(ValueError):
"""UBJSON data decoding error."""
class MarkerError(DecodeError):
"""Raises if unknown or invalid marker was found in decoded data stream."""
class EarlyEndOfStreamError(DecodeError):
"""Raises when data stream unexpectedly ends."""
class EncodeError(TypeError):
"""Python object encoding error."""
|
|
bb0d7bf6dc36e8d013befc7f13da3cb6c3830c46
|
core/admin/migrations/versions/049fed905da7_.py
|
core/admin/migrations/versions/049fed905da7_.py
|
""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE"))
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255))
|
Enforce the nocase collation on the email table
|
Enforce the nocase collation on the email table
|
Python
|
mit
|
kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io
|
Enforce the nocase collation on the email table
|
""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE"))
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255))
|
<commit_before><commit_msg>Enforce the nocase collation on the email table<commit_after>
|
""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE"))
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255))
|
Enforce the nocase collation on the email table""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE"))
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255))
|
<commit_before><commit_msg>Enforce the nocase collation on the email table<commit_after>""" Enforce the nocase collation on the email table
Revision ID: 049fed905da7
Revises: 49d77a93118e
Create Date: 2018-04-21 13:23:56.571524
"""
# revision identifiers, used by Alembic.
revision = '049fed905da7'
down_revision = '49d77a93118e'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE"))
def downgrade():
with op.batch_alter_table('user') as batch:
batch.alter_column('email', type_=sa.String(length=255))
|
|
cff8e237afb567ba980b8790d9c7416b8be21fdd
|
solutions/uri/1010/1010.py
|
solutions/uri/1010/1010.py
|
import sys
s = 0.0
for line in sys.stdin:
a, b, c = line.split()
a, b, c = int(a), int(b), float(c)
s += c * b
print(f'VALOR A PAGAR: R$ {s:.2f}')
|
Solve Simple Calculate in python
|
Solve Simple Calculate in python
|
Python
|
mit
|
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
|
Solve Simple Calculate in python
|
import sys
s = 0.0
for line in sys.stdin:
a, b, c = line.split()
a, b, c = int(a), int(b), float(c)
s += c * b
print(f'VALOR A PAGAR: R$ {s:.2f}')
|
<commit_before><commit_msg>Solve Simple Calculate in python<commit_after>
|
import sys
s = 0.0
for line in sys.stdin:
a, b, c = line.split()
a, b, c = int(a), int(b), float(c)
s += c * b
print(f'VALOR A PAGAR: R$ {s:.2f}')
|
Solve Simple Calculate in pythonimport sys
s = 0.0
for line in sys.stdin:
a, b, c = line.split()
a, b, c = int(a), int(b), float(c)
s += c * b
print(f'VALOR A PAGAR: R$ {s:.2f}')
|
<commit_before><commit_msg>Solve Simple Calculate in python<commit_after>import sys
s = 0.0
for line in sys.stdin:
a, b, c = line.split()
a, b, c = int(a), int(b), float(c)
s += c * b
print(f'VALOR A PAGAR: R$ {s:.2f}')
|
|
12c3d91f3a0ae571300f7729d09766aba12c81ac
|
CurveAnalysis/fourier_analysis.py
|
CurveAnalysis/fourier_analysis.py
|
#!/usr/bin/env python
import csv
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
from scipy import fftpack
import matplotlib.pyplot as plt
note_name = '77726_MS-DAR-00205-00001-000-00096'
base_dir = '/home/ibanez/data/amnh/darwin_notes/'
images_dir = base_dir + 'images/'
curves_dir = base_dir + 'image_csvs/'
image_name = images_dir + note_name + '.jpg'
north_curve_name = curves_dir + note_name + '_north.csv'
south_curve_name = curves_dir + note_name + '_south.csv'
input_north_curve = list(csv.reader(open(north_curve_name)))
input_south_curve = list(csv.reader(open(south_curve_name)))
N = 4096
north_curve = signal.resample(input_north_curve, N)
south_curve = signal.resample(input_south_curve, N)
x_signal = []
y_signal = []
for x, y in north_curve:
y_signal.append(y)
x_signal.append(x)
yfft = fftpack.fftshift(fft(y_signal))
plt.semilogy(2.0/N * np.abs(yfft))
plt.grid()
plt.show()
|
Add python script to compute fast fourier transforms.
|
Add python script to compute fast fourier transforms.
|
Python
|
apache-2.0
|
HackTheStacks/darwin-notes-image-processing,HackTheStacks/darwin-notes-image-processing
|
Add python script to compute fast fourier transforms.
|
#!/usr/bin/env python
import csv
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
from scipy import fftpack
import matplotlib.pyplot as plt
note_name = '77726_MS-DAR-00205-00001-000-00096'
base_dir = '/home/ibanez/data/amnh/darwin_notes/'
images_dir = base_dir + 'images/'
curves_dir = base_dir + 'image_csvs/'
image_name = images_dir + note_name + '.jpg'
north_curve_name = curves_dir + note_name + '_north.csv'
south_curve_name = curves_dir + note_name + '_south.csv'
input_north_curve = list(csv.reader(open(north_curve_name)))
input_south_curve = list(csv.reader(open(south_curve_name)))
N = 4096
north_curve = signal.resample(input_north_curve, N)
south_curve = signal.resample(input_south_curve, N)
x_signal = []
y_signal = []
for x, y in north_curve:
y_signal.append(y)
x_signal.append(x)
yfft = fftpack.fftshift(fft(y_signal))
plt.semilogy(2.0/N * np.abs(yfft))
plt.grid()
plt.show()
|
<commit_before><commit_msg>Add python script to compute fast fourier transforms.<commit_after>
|
#!/usr/bin/env python
import csv
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
from scipy import fftpack
import matplotlib.pyplot as plt
note_name = '77726_MS-DAR-00205-00001-000-00096'
base_dir = '/home/ibanez/data/amnh/darwin_notes/'
images_dir = base_dir + 'images/'
curves_dir = base_dir + 'image_csvs/'
image_name = images_dir + note_name + '.jpg'
north_curve_name = curves_dir + note_name + '_north.csv'
south_curve_name = curves_dir + note_name + '_south.csv'
input_north_curve = list(csv.reader(open(north_curve_name)))
input_south_curve = list(csv.reader(open(south_curve_name)))
N = 4096
north_curve = signal.resample(input_north_curve, N)
south_curve = signal.resample(input_south_curve, N)
x_signal = []
y_signal = []
for x, y in north_curve:
y_signal.append(y)
x_signal.append(x)
yfft = fftpack.fftshift(fft(y_signal))
plt.semilogy(2.0/N * np.abs(yfft))
plt.grid()
plt.show()
|
Add python script to compute fast fourier transforms.#!/usr/bin/env python
import csv
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
from scipy import fftpack
import matplotlib.pyplot as plt
note_name = '77726_MS-DAR-00205-00001-000-00096'
base_dir = '/home/ibanez/data/amnh/darwin_notes/'
images_dir = base_dir + 'images/'
curves_dir = base_dir + 'image_csvs/'
image_name = images_dir + note_name + '.jpg'
north_curve_name = curves_dir + note_name + '_north.csv'
south_curve_name = curves_dir + note_name + '_south.csv'
input_north_curve = list(csv.reader(open(north_curve_name)))
input_south_curve = list(csv.reader(open(south_curve_name)))
N = 4096
north_curve = signal.resample(input_north_curve, N)
south_curve = signal.resample(input_south_curve, N)
x_signal = []
y_signal = []
for x, y in north_curve:
y_signal.append(y)
x_signal.append(x)
yfft = fftpack.fftshift(fft(y_signal))
plt.semilogy(2.0/N * np.abs(yfft))
plt.grid()
plt.show()
|
<commit_before><commit_msg>Add python script to compute fast fourier transforms.<commit_after>#!/usr/bin/env python
import csv
import numpy as np
from scipy import signal
from scipy.fftpack import fft, ifft
from scipy import fftpack
import matplotlib.pyplot as plt
note_name = '77726_MS-DAR-00205-00001-000-00096'
base_dir = '/home/ibanez/data/amnh/darwin_notes/'
images_dir = base_dir + 'images/'
curves_dir = base_dir + 'image_csvs/'
image_name = images_dir + note_name + '.jpg'
north_curve_name = curves_dir + note_name + '_north.csv'
south_curve_name = curves_dir + note_name + '_south.csv'
input_north_curve = list(csv.reader(open(north_curve_name)))
input_south_curve = list(csv.reader(open(south_curve_name)))
N = 4096
north_curve = signal.resample(input_north_curve, N)
south_curve = signal.resample(input_south_curve, N)
x_signal = []
y_signal = []
for x, y in north_curve:
y_signal.append(y)
x_signal.append(x)
yfft = fftpack.fftshift(fft(y_signal))
plt.semilogy(2.0/N * np.abs(yfft))
plt.grid()
plt.show()
|
|
fd80e8088c840fb836f00f4d41b0fcfd0a132d0a
|
calaccess_processed/migrations/0023_auto_20170206_0037.py
|
calaccess_processed/migrations/0023_auto_20170206_0037.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 00:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0022_ballotmeasurecontestidentifier'),
]
operations = [
migrations.AlterModelOptions(
name='party',
options={'ordering': ('name',), 'verbose_name_plural': 'parties'},
),
]
|
Add migration for Party meta option changes
|
Add migration for Party meta option changes
|
Python
|
mit
|
california-civic-data-coalition/django-calaccess-processed-data,california-civic-data-coalition/django-calaccess-processed-data
|
Add migration for Party meta option changes
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 00:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0022_ballotmeasurecontestidentifier'),
]
operations = [
migrations.AlterModelOptions(
name='party',
options={'ordering': ('name',), 'verbose_name_plural': 'parties'},
),
]
|
<commit_before><commit_msg>Add migration for Party meta option changes<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 00:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0022_ballotmeasurecontestidentifier'),
]
operations = [
migrations.AlterModelOptions(
name='party',
options={'ordering': ('name',), 'verbose_name_plural': 'parties'},
),
]
|
Add migration for Party meta option changes# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 00:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0022_ballotmeasurecontestidentifier'),
]
operations = [
migrations.AlterModelOptions(
name='party',
options={'ordering': ('name',), 'verbose_name_plural': 'parties'},
),
]
|
<commit_before><commit_msg>Add migration for Party meta option changes<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 00:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0022_ballotmeasurecontestidentifier'),
]
operations = [
migrations.AlterModelOptions(
name='party',
options={'ordering': ('name',), 'verbose_name_plural': 'parties'},
),
]
|
|
22df7a89020cbbcf80a88bcf3572dea591884861
|
avatar/urls.py
|
avatar/urls.py
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\+\w]+)/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\w\d\.\-_]{3,30})/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
Support for username with extra chars.
|
Support for username with extra chars.
|
Python
|
bsd-3-clause
|
tbabej/django-avatar,Brendtron5000/django-avatar,Nuevosmedios/django-avatar,stellalie/django-avatar,barbuza/django-avatar,allenling/django-avatar,allenling/django-avatar,tbabej/django-avatar,z4r/django-avatar,imgmix/django-avatar,hexenxp14/django-avatar,MachineandMagic/django-avatar,brajeshvit/avatarmodule,integricho/django-avatar,Kami/django-avatar,integricho/django-avatar,DrMeers/django-avatar,jessehon/django-avatar,z4r/django-avatar,rizumu/django-avatar,grantmcconnaughey/django-avatar,robertour/django-avatar,arctelix/django-avatar,robertour/django-avatar,ericroberts/django-avatar,jezdez/django-avatar,Nuevosmedios/django-avatar,bazerk/django-avatar,Mapiarz/django-avatar,MachineandMagic/django-avatar,lizrice/django-avatar,allenling/bugfixavatar,fedetorre/django-avatar,rizumu/django-avatar,dannybrowne86/django-avatar,Temesis/django-avatar,ayang/django-avatar,aptwebapps/django-avatar,ad-m/django-avatar,allenling/bugfixavatar,imgmix/django-avatar,miohtama/django-avatar,e4c5/django-avatar,holzenburg/django-avatar,jessehon/django-avatar,caumons/django-avatar,guzru/django-avatar,TomLottermann/django-avatar,brajeshvit/avatarmodule,therocode/django-avatar,Mapiarz/django-avatar,e4c5/django-avatar,holzenburg/django-avatar,ericroberts/django-avatar,barbuza/django-avatar,Brendtron5000/django-avatar,ad-m/django-avatar,stellalie/django-avatar,dannybrowne86/django-avatar,bazerk/django-avatar,heliodor/django-avatar,jezdez/django-avatar,aptwebapps/django-avatar,grantmcconnaughey/django-avatar,therocode/django-avatar,nai-central/django-avatar,nai-central/django-avatar,hexenxp14/django-avatar,ayang/django-avatar
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\+\w]+)/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
Support for username with extra chars.
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\w\d\.\-_]{3,30})/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
<commit_before>from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\+\w]+)/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
<commit_msg>Support for username with extra chars.<commit_after>
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\w\d\.\-_]{3,30})/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\+\w]+)/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
Support for username with extra chars.from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\w\d\.\-_]{3,30})/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
<commit_before>from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\+\w]+)/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
<commit_msg>Support for username with extra chars.<commit_after>from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('avatar.views',
url('^add/$', 'add', name='avatar_add'),
url('^change/$', 'change', name='avatar_change'),
url('^delete/$', 'delete', name='avatar_delete'),
url('^render_primary/(?P<user>[\w\d\.\-_]{3,30})/(?P<size>[\d]+)/$', 'render_primary', name='avatar_render_primary'),
)
|
f7008616465267906dd7ca75e0339812549e2988
|
tests/unit_tests/test_deplete_operator.py
|
tests/unit_tests/test_deplete_operator.py
|
"""Basic unit tests for openmc.deplete.Operator instantiation
Modifies and resets environment variable OPENMC_CROSS_SECTIONS
to a custom file with new depletion_chain node
"""
from os import remove
from os import environ
from unittest import mock
from pathlib import Path
import pytest
from openmc.deplete.abc import TransportOperator
from openmc.deplete.chain import Chain
BARE_XS_FILE = "bare_cross_sections.xml"
CHAIN_PATH = Path().cwd() / "tests" / "chain_simple.xml"
@pytest.fixture(scope="module")
def bare_xs():
"""Create a very basic cross_sections file, return simple Chain.
"""
bare_xs_contents = """<?xml version="1.0"?>
<cross_sections>
<depletion_chain path="{}" />
</cross_sections>
""".format(CHAIN_PATH)
with open(BARE_XS_FILE, "w") as out:
out.write(bare_xs_contents)
yield
remove(BARE_XS_FILE)
class BareDepleteOperator(TransportOperator):
"""Very basic class for testing the initialization."""
# declare abstract methods so object can be created
def __call__(self, *args, **kwargs):
pass
def initial_condition(self):
pass
def get_results_info(self):
pass
@mock.patch.dict(environ, {"OPENMC_CROSS_SECTIONS": BARE_XS_FILE})
def test_operator_init(bare_xs):
"""The test will set and unset environment variable OPENMC_CROSS_SECTIONS
to point towards a temporary dummy file. This file will be removed
at the end of the test, and only contains a
depletion_chain node."""
# force operator to read from OPENMC_CROSS_SECTIONS
bare_op = BareDepleteOperator(chain_file=None)
act_chain = bare_op.chain
ref_chain = Chain.from_xml(CHAIN_PATH)
assert len(act_chain) == len(ref_chain)
for name in ref_chain.nuclide_dict:
# compare openmc.deplete.Nuclide objects
ref_nuc = ref_chain[name]
act_nuc = act_chain[name]
for prop in [
'name', 'half_life', 'decay_energy', 'reactions',
'decay_modes', 'yield_data', 'yield_energies',
]:
assert getattr(act_nuc, prop) == getattr(ref_nuc, prop), prop
|
Add test for the instantiation of deplete.Operator
|
Add test for the instantiation of deplete.Operator
Construct a minimal cross sections xml file that only
contains the depletion_chain node required by
the Operator. The path to this file is set to the
OPENMC_CROSS_SECTIONS file, and reverted after the test.
The depletion_chain points towards the chain_simple.xml
files, and a reference Chain is produced in the test.
The test involves creating a bare Operator instance, that
constructs a chain based on the temporary
OPENMC_CROSS_SECTIONS file. This chain is compared to
that produced by reading the chain_simple.xml file stored
in the test directory.
|
Python
|
mit
|
paulromano/openmc,smharper/openmc,paulromano/openmc,liangjg/openmc,mit-crpg/openmc,shikhar413/openmc,liangjg/openmc,amandalund/openmc,smharper/openmc,amandalund/openmc,walshjon/openmc,liangjg/openmc,paulromano/openmc,mit-crpg/openmc,walshjon/openmc,mit-crpg/openmc,amandalund/openmc,shikhar413/openmc,shikhar413/openmc,walshjon/openmc,smharper/openmc,smharper/openmc,walshjon/openmc,liangjg/openmc,amandalund/openmc,paulromano/openmc,mit-crpg/openmc,shikhar413/openmc
|
Add test for the instantiation of deplete.Operator
Construct a minimal cross sections xml file that only
contains the depletion_chain node required by
the Operator. The path to this file is set to the
OPENMC_CROSS_SECTIONS file, and reverted after the test.
The depletion_chain points towards the chain_simple.xml
files, and a reference Chain is produced in the test.
The test involves creating a bare Operator instance, that
constructs a chain based on the temporary
OPENMC_CROSS_SECTIONS file. This chain is compared to
that produced by reading the chain_simple.xml file stored
in the test directory.
|
"""Basic unit tests for openmc.deplete.Operator instantiation
Modifies and resets environment variable OPENMC_CROSS_SECTIONS
to a custom file with new depletion_chain node
"""
from os import remove
from os import environ
from unittest import mock
from pathlib import Path
import pytest
from openmc.deplete.abc import TransportOperator
from openmc.deplete.chain import Chain
BARE_XS_FILE = "bare_cross_sections.xml"
CHAIN_PATH = Path().cwd() / "tests" / "chain_simple.xml"
@pytest.fixture(scope="module")
def bare_xs():
"""Create a very basic cross_sections file, return simple Chain.
"""
bare_xs_contents = """<?xml version="1.0"?>
<cross_sections>
<depletion_chain path="{}" />
</cross_sections>
""".format(CHAIN_PATH)
with open(BARE_XS_FILE, "w") as out:
out.write(bare_xs_contents)
yield
remove(BARE_XS_FILE)
class BareDepleteOperator(TransportOperator):
"""Very basic class for testing the initialization."""
# declare abstract methods so object can be created
def __call__(self, *args, **kwargs):
pass
def initial_condition(self):
pass
def get_results_info(self):
pass
@mock.patch.dict(environ, {"OPENMC_CROSS_SECTIONS": BARE_XS_FILE})
def test_operator_init(bare_xs):
"""The test will set and unset environment variable OPENMC_CROSS_SECTIONS
to point towards a temporary dummy file. This file will be removed
at the end of the test, and only contains a
depletion_chain node."""
# force operator to read from OPENMC_CROSS_SECTIONS
bare_op = BareDepleteOperator(chain_file=None)
act_chain = bare_op.chain
ref_chain = Chain.from_xml(CHAIN_PATH)
assert len(act_chain) == len(ref_chain)
for name in ref_chain.nuclide_dict:
# compare openmc.deplete.Nuclide objects
ref_nuc = ref_chain[name]
act_nuc = act_chain[name]
for prop in [
'name', 'half_life', 'decay_energy', 'reactions',
'decay_modes', 'yield_data', 'yield_energies',
]:
assert getattr(act_nuc, prop) == getattr(ref_nuc, prop), prop
|
<commit_before><commit_msg>Add test for the instantiation of deplete.Operator
Construct a minimal cross sections xml file that only
contains the depletion_chain node required by
the Operator. The path to this file is set to the
OPENMC_CROSS_SECTIONS file, and reverted after the test.
The depletion_chain points towards the chain_simple.xml
files, and a reference Chain is produced in the test.
The test involves creating a bare Operator instance, that
constructs a chain based on the temporary
OPENMC_CROSS_SECTIONS file. This chain is compared to
that produced by reading the chain_simple.xml file stored
in the test directory.<commit_after>
|
"""Basic unit tests for openmc.deplete.Operator instantiation
Modifies and resets environment variable OPENMC_CROSS_SECTIONS
to a custom file with new depletion_chain node
"""
from os import remove
from os import environ
from unittest import mock
from pathlib import Path
import pytest
from openmc.deplete.abc import TransportOperator
from openmc.deplete.chain import Chain
BARE_XS_FILE = "bare_cross_sections.xml"
CHAIN_PATH = Path().cwd() / "tests" / "chain_simple.xml"
@pytest.fixture(scope="module")
def bare_xs():
"""Create a very basic cross_sections file, return simple Chain.
"""
bare_xs_contents = """<?xml version="1.0"?>
<cross_sections>
<depletion_chain path="{}" />
</cross_sections>
""".format(CHAIN_PATH)
with open(BARE_XS_FILE, "w") as out:
out.write(bare_xs_contents)
yield
remove(BARE_XS_FILE)
class BareDepleteOperator(TransportOperator):
"""Very basic class for testing the initialization."""
# declare abstract methods so object can be created
def __call__(self, *args, **kwargs):
pass
def initial_condition(self):
pass
def get_results_info(self):
pass
@mock.patch.dict(environ, {"OPENMC_CROSS_SECTIONS": BARE_XS_FILE})
def test_operator_init(bare_xs):
"""The test will set and unset environment variable OPENMC_CROSS_SECTIONS
to point towards a temporary dummy file. This file will be removed
at the end of the test, and only contains a
depletion_chain node."""
# force operator to read from OPENMC_CROSS_SECTIONS
bare_op = BareDepleteOperator(chain_file=None)
act_chain = bare_op.chain
ref_chain = Chain.from_xml(CHAIN_PATH)
assert len(act_chain) == len(ref_chain)
for name in ref_chain.nuclide_dict:
# compare openmc.deplete.Nuclide objects
ref_nuc = ref_chain[name]
act_nuc = act_chain[name]
for prop in [
'name', 'half_life', 'decay_energy', 'reactions',
'decay_modes', 'yield_data', 'yield_energies',
]:
assert getattr(act_nuc, prop) == getattr(ref_nuc, prop), prop
|
Add test for the instantiation of deplete.Operator
Construct a minimal cross sections xml file that only
contains the depletion_chain node required by
the Operator. The path to this file is set to the
OPENMC_CROSS_SECTIONS file, and reverted after the test.
The depletion_chain points towards the chain_simple.xml
files, and a reference Chain is produced in the test.
The test involves creating a bare Operator instance, that
constructs a chain based on the temporary
OPENMC_CROSS_SECTIONS file. This chain is compared to
that produced by reading the chain_simple.xml file stored
in the test directory."""Basic unit tests for openmc.deplete.Operator instantiation
Modifies and resets environment variable OPENMC_CROSS_SECTIONS
to a custom file with new depletion_chain node
"""
from os import remove
from os import environ
from unittest import mock
from pathlib import Path
import pytest
from openmc.deplete.abc import TransportOperator
from openmc.deplete.chain import Chain
BARE_XS_FILE = "bare_cross_sections.xml"
CHAIN_PATH = Path().cwd() / "tests" / "chain_simple.xml"
@pytest.fixture(scope="module")
def bare_xs():
"""Create a very basic cross_sections file, return simple Chain.
"""
bare_xs_contents = """<?xml version="1.0"?>
<cross_sections>
<depletion_chain path="{}" />
</cross_sections>
""".format(CHAIN_PATH)
with open(BARE_XS_FILE, "w") as out:
out.write(bare_xs_contents)
yield
remove(BARE_XS_FILE)
class BareDepleteOperator(TransportOperator):
"""Very basic class for testing the initialization."""
# declare abstract methods so object can be created
def __call__(self, *args, **kwargs):
pass
def initial_condition(self):
pass
def get_results_info(self):
pass
@mock.patch.dict(environ, {"OPENMC_CROSS_SECTIONS": BARE_XS_FILE})
def test_operator_init(bare_xs):
"""The test will set and unset environment variable OPENMC_CROSS_SECTIONS
to point towards a temporary dummy file. This file will be removed
at the end of the test, and only contains a
depletion_chain node."""
# force operator to read from OPENMC_CROSS_SECTIONS
bare_op = BareDepleteOperator(chain_file=None)
act_chain = bare_op.chain
ref_chain = Chain.from_xml(CHAIN_PATH)
assert len(act_chain) == len(ref_chain)
for name in ref_chain.nuclide_dict:
# compare openmc.deplete.Nuclide objects
ref_nuc = ref_chain[name]
act_nuc = act_chain[name]
for prop in [
'name', 'half_life', 'decay_energy', 'reactions',
'decay_modes', 'yield_data', 'yield_energies',
]:
assert getattr(act_nuc, prop) == getattr(ref_nuc, prop), prop
|
<commit_before><commit_msg>Add test for the instantiation of deplete.Operator
Construct a minimal cross sections xml file that only
contains the depletion_chain node required by
the Operator. The path to this file is set to the
OPENMC_CROSS_SECTIONS file, and reverted after the test.
The depletion_chain points towards the chain_simple.xml
files, and a reference Chain is produced in the test.
The test involves creating a bare Operator instance, that
constructs a chain based on the temporary
OPENMC_CROSS_SECTIONS file. This chain is compared to
that produced by reading the chain_simple.xml file stored
in the test directory.<commit_after>"""Basic unit tests for openmc.deplete.Operator instantiation
Modifies and resets environment variable OPENMC_CROSS_SECTIONS
to a custom file with new depletion_chain node
"""
from os import remove
from os import environ
from unittest import mock
from pathlib import Path
import pytest
from openmc.deplete.abc import TransportOperator
from openmc.deplete.chain import Chain
BARE_XS_FILE = "bare_cross_sections.xml"
CHAIN_PATH = Path().cwd() / "tests" / "chain_simple.xml"
@pytest.fixture(scope="module")
def bare_xs():
"""Create a very basic cross_sections file, return simple Chain.
"""
bare_xs_contents = """<?xml version="1.0"?>
<cross_sections>
<depletion_chain path="{}" />
</cross_sections>
""".format(CHAIN_PATH)
with open(BARE_XS_FILE, "w") as out:
out.write(bare_xs_contents)
yield
remove(BARE_XS_FILE)
class BareDepleteOperator(TransportOperator):
"""Very basic class for testing the initialization."""
# declare abstract methods so object can be created
def __call__(self, *args, **kwargs):
pass
def initial_condition(self):
pass
def get_results_info(self):
pass
@mock.patch.dict(environ, {"OPENMC_CROSS_SECTIONS": BARE_XS_FILE})
def test_operator_init(bare_xs):
"""The test will set and unset environment variable OPENMC_CROSS_SECTIONS
to point towards a temporary dummy file. This file will be removed
at the end of the test, and only contains a
depletion_chain node."""
# force operator to read from OPENMC_CROSS_SECTIONS
bare_op = BareDepleteOperator(chain_file=None)
act_chain = bare_op.chain
ref_chain = Chain.from_xml(CHAIN_PATH)
assert len(act_chain) == len(ref_chain)
for name in ref_chain.nuclide_dict:
# compare openmc.deplete.Nuclide objects
ref_nuc = ref_chain[name]
act_nuc = act_chain[name]
for prop in [
'name', 'half_life', 'decay_energy', 'reactions',
'decay_modes', 'yield_data', 'yield_energies',
]:
assert getattr(act_nuc, prop) == getattr(ref_nuc, prop), prop
|
|
657e74878179b505c12cdf8e607cd9b3ae549420
|
towel/templatetags/verbose_name_tags.py
|
towel/templatetags/verbose_name_tags.py
|
import itertools
from django import template
register = template.Library()
PATHS = [
'_meta', # model
'queryset.model._meta',
'instance._meta',
'model._meta',
]
def _resolve(instance, last_part):
for path in PATHS:
o = instance
found = True
for part in itertools.chain(path.split('.'), [last_part]):
try:
o = getattr(o, part)
except AttributeError:
found = False
break
if found:
return o
@register.filter
def verbose_name(item):
return _resolve(item, 'verbose_name')
@register.filter
def verbose_name_plural(item):
return _resolve(item, 'verbose_name_plural')
|
Add helpers to determine verbose_name(_plural)? of arbitrary objects
|
Add helpers to determine verbose_name(_plural)? of arbitrary objects
|
Python
|
bsd-3-clause
|
matthiask/towel,matthiask/towel,matthiask/towel,matthiask/towel
|
Add helpers to determine verbose_name(_plural)? of arbitrary objects
|
import itertools
from django import template
register = template.Library()
PATHS = [
'_meta', # model
'queryset.model._meta',
'instance._meta',
'model._meta',
]
def _resolve(instance, last_part):
for path in PATHS:
o = instance
found = True
for part in itertools.chain(path.split('.'), [last_part]):
try:
o = getattr(o, part)
except AttributeError:
found = False
break
if found:
return o
@register.filter
def verbose_name(item):
return _resolve(item, 'verbose_name')
@register.filter
def verbose_name_plural(item):
return _resolve(item, 'verbose_name_plural')
|
<commit_before><commit_msg>Add helpers to determine verbose_name(_plural)? of arbitrary objects<commit_after>
|
import itertools
from django import template
register = template.Library()
PATHS = [
'_meta', # model
'queryset.model._meta',
'instance._meta',
'model._meta',
]
def _resolve(instance, last_part):
for path in PATHS:
o = instance
found = True
for part in itertools.chain(path.split('.'), [last_part]):
try:
o = getattr(o, part)
except AttributeError:
found = False
break
if found:
return o
@register.filter
def verbose_name(item):
return _resolve(item, 'verbose_name')
@register.filter
def verbose_name_plural(item):
return _resolve(item, 'verbose_name_plural')
|
Add helpers to determine verbose_name(_plural)? of arbitrary objectsimport itertools
from django import template
register = template.Library()
PATHS = [
'_meta', # model
'queryset.model._meta',
'instance._meta',
'model._meta',
]
def _resolve(instance, last_part):
for path in PATHS:
o = instance
found = True
for part in itertools.chain(path.split('.'), [last_part]):
try:
o = getattr(o, part)
except AttributeError:
found = False
break
if found:
return o
@register.filter
def verbose_name(item):
return _resolve(item, 'verbose_name')
@register.filter
def verbose_name_plural(item):
return _resolve(item, 'verbose_name_plural')
|
<commit_before><commit_msg>Add helpers to determine verbose_name(_plural)? of arbitrary objects<commit_after>import itertools
from django import template
register = template.Library()
PATHS = [
'_meta', # model
'queryset.model._meta',
'instance._meta',
'model._meta',
]
def _resolve(instance, last_part):
for path in PATHS:
o = instance
found = True
for part in itertools.chain(path.split('.'), [last_part]):
try:
o = getattr(o, part)
except AttributeError:
found = False
break
if found:
return o
@register.filter
def verbose_name(item):
return _resolve(item, 'verbose_name')
@register.filter
def verbose_name_plural(item):
return _resolve(item, 'verbose_name_plural')
|
|
beef7ad662e67a39e9ca387f7643129f88cda7df
|
tools/supervisor/rotate_supervisor_logs.py
|
tools/supervisor/rotate_supervisor_logs.py
|
#!/usr/bin/env python3
#
# Rotates and (optionally) compresses Supervisor logs using `logrotate`
#
# See:
#
# * https://www.rounds.com/blog/easy-logging-with-logrotate-and-supervisord/
# * https://gist.github.com/glarrain/6165987
#
import os
import subprocess
import tempfile
from mediawords.util.config import get_config
from mediawords.util.paths import mc_root_path
from mediawords.util.log import create_logger
# Max. size of a single log file (in bytes)
__LOG_MAX_SIZE = 100 * 1024 * 1024
# Number of old logs to keep
__OLD_LOG_COUNT = 7
l = create_logger(__name__)
# noinspection SpellCheckingInspection
def rotate_supervisor_logs():
root_path = mc_root_path()
l.debug('Media Cloud root path: %s' % root_path)
config = get_config()
child_log_dir = config['supervisor']['childlogdir']
l.debug('Child log directory: %s' % child_log_dir)
supervisor_logs_dir = os.path.join(root_path, child_log_dir)
l.info('Supervisor logs path: %s' % supervisor_logs_dir)
if not os.path.isdir(supervisor_logs_dir):
raise Exception('Supervisor logs directory does not exist at path: %s' % supervisor_logs_dir)
logrotate_config = '''
%(supervisor_logs_dir)s/*.log {
maxsize %(log_max_size)d
rotate %(old_log_count)d
copytruncate
compress
missingok
notifempty
}
''' % {
'supervisor_logs_dir': supervisor_logs_dir,
'log_max_size': __LOG_MAX_SIZE,
'old_log_count': __OLD_LOG_COUNT,
}
logrotate_temp_fd, logrotate_temp_config_path = tempfile.mkstemp(suffix='.conf', prefix='logrotate')
l.debug('Temporary logtorate config path: %s' % logrotate_temp_config_path)
with os.fdopen(logrotate_temp_fd, 'w') as tmp:
tmp.write(logrotate_config)
l.info('Running logrotate...')
subprocess.check_call(['logrotate', '--verbose', logrotate_temp_config_path])
l.debug('Cleaning up temporary logrotate config...')
os.unlink(logrotate_temp_config_path)
if __name__ == '__main__':
rotate_supervisor_logs()
|
Add script to rotate Supervisor log files
|
Add script to rotate Supervisor log files
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
Add script to rotate Supervisor log files
|
#!/usr/bin/env python3
#
# Rotates and (optionally) compresses Supervisor logs using `logrotate`
#
# See:
#
# * https://www.rounds.com/blog/easy-logging-with-logrotate-and-supervisord/
# * https://gist.github.com/glarrain/6165987
#
import os
import subprocess
import tempfile
from mediawords.util.config import get_config
from mediawords.util.paths import mc_root_path
from mediawords.util.log import create_logger
# Max. size of a single log file (in bytes)
__LOG_MAX_SIZE = 100 * 1024 * 1024
# Number of old logs to keep
__OLD_LOG_COUNT = 7
l = create_logger(__name__)
# noinspection SpellCheckingInspection
def rotate_supervisor_logs():
root_path = mc_root_path()
l.debug('Media Cloud root path: %s' % root_path)
config = get_config()
child_log_dir = config['supervisor']['childlogdir']
l.debug('Child log directory: %s' % child_log_dir)
supervisor_logs_dir = os.path.join(root_path, child_log_dir)
l.info('Supervisor logs path: %s' % supervisor_logs_dir)
if not os.path.isdir(supervisor_logs_dir):
raise Exception('Supervisor logs directory does not exist at path: %s' % supervisor_logs_dir)
logrotate_config = '''
%(supervisor_logs_dir)s/*.log {
maxsize %(log_max_size)d
rotate %(old_log_count)d
copytruncate
compress
missingok
notifempty
}
''' % {
'supervisor_logs_dir': supervisor_logs_dir,
'log_max_size': __LOG_MAX_SIZE,
'old_log_count': __OLD_LOG_COUNT,
}
logrotate_temp_fd, logrotate_temp_config_path = tempfile.mkstemp(suffix='.conf', prefix='logrotate')
l.debug('Temporary logtorate config path: %s' % logrotate_temp_config_path)
with os.fdopen(logrotate_temp_fd, 'w') as tmp:
tmp.write(logrotate_config)
l.info('Running logrotate...')
subprocess.check_call(['logrotate', '--verbose', logrotate_temp_config_path])
l.debug('Cleaning up temporary logrotate config...')
os.unlink(logrotate_temp_config_path)
if __name__ == '__main__':
rotate_supervisor_logs()
|
<commit_before><commit_msg>Add script to rotate Supervisor log files<commit_after>
|
#!/usr/bin/env python3
#
# Rotates and (optionally) compresses Supervisor logs using `logrotate`
#
# See:
#
# * https://www.rounds.com/blog/easy-logging-with-logrotate-and-supervisord/
# * https://gist.github.com/glarrain/6165987
#
import os
import subprocess
import tempfile
from mediawords.util.config import get_config
from mediawords.util.paths import mc_root_path
from mediawords.util.log import create_logger
# Max. size of a single log file (in bytes)
__LOG_MAX_SIZE = 100 * 1024 * 1024
# Number of old logs to keep
__OLD_LOG_COUNT = 7
l = create_logger(__name__)
# noinspection SpellCheckingInspection
def rotate_supervisor_logs():
root_path = mc_root_path()
l.debug('Media Cloud root path: %s' % root_path)
config = get_config()
child_log_dir = config['supervisor']['childlogdir']
l.debug('Child log directory: %s' % child_log_dir)
supervisor_logs_dir = os.path.join(root_path, child_log_dir)
l.info('Supervisor logs path: %s' % supervisor_logs_dir)
if not os.path.isdir(supervisor_logs_dir):
raise Exception('Supervisor logs directory does not exist at path: %s' % supervisor_logs_dir)
logrotate_config = '''
%(supervisor_logs_dir)s/*.log {
maxsize %(log_max_size)d
rotate %(old_log_count)d
copytruncate
compress
missingok
notifempty
}
''' % {
'supervisor_logs_dir': supervisor_logs_dir,
'log_max_size': __LOG_MAX_SIZE,
'old_log_count': __OLD_LOG_COUNT,
}
logrotate_temp_fd, logrotate_temp_config_path = tempfile.mkstemp(suffix='.conf', prefix='logrotate')
l.debug('Temporary logtorate config path: %s' % logrotate_temp_config_path)
with os.fdopen(logrotate_temp_fd, 'w') as tmp:
tmp.write(logrotate_config)
l.info('Running logrotate...')
subprocess.check_call(['logrotate', '--verbose', logrotate_temp_config_path])
l.debug('Cleaning up temporary logrotate config...')
os.unlink(logrotate_temp_config_path)
if __name__ == '__main__':
rotate_supervisor_logs()
|
Add script to rotate Supervisor log files#!/usr/bin/env python3
#
# Rotates and (optionally) compresses Supervisor logs using `logrotate`
#
# See:
#
# * https://www.rounds.com/blog/easy-logging-with-logrotate-and-supervisord/
# * https://gist.github.com/glarrain/6165987
#
import os
import subprocess
import tempfile
from mediawords.util.config import get_config
from mediawords.util.paths import mc_root_path
from mediawords.util.log import create_logger
# Max. size of a single log file (in bytes)
__LOG_MAX_SIZE = 100 * 1024 * 1024
# Number of old logs to keep
__OLD_LOG_COUNT = 7
l = create_logger(__name__)
# noinspection SpellCheckingInspection
def rotate_supervisor_logs():
root_path = mc_root_path()
l.debug('Media Cloud root path: %s' % root_path)
config = get_config()
child_log_dir = config['supervisor']['childlogdir']
l.debug('Child log directory: %s' % child_log_dir)
supervisor_logs_dir = os.path.join(root_path, child_log_dir)
l.info('Supervisor logs path: %s' % supervisor_logs_dir)
if not os.path.isdir(supervisor_logs_dir):
raise Exception('Supervisor logs directory does not exist at path: %s' % supervisor_logs_dir)
logrotate_config = '''
%(supervisor_logs_dir)s/*.log {
maxsize %(log_max_size)d
rotate %(old_log_count)d
copytruncate
compress
missingok
notifempty
}
''' % {
'supervisor_logs_dir': supervisor_logs_dir,
'log_max_size': __LOG_MAX_SIZE,
'old_log_count': __OLD_LOG_COUNT,
}
logrotate_temp_fd, logrotate_temp_config_path = tempfile.mkstemp(suffix='.conf', prefix='logrotate')
l.debug('Temporary logtorate config path: %s' % logrotate_temp_config_path)
with os.fdopen(logrotate_temp_fd, 'w') as tmp:
tmp.write(logrotate_config)
l.info('Running logrotate...')
subprocess.check_call(['logrotate', '--verbose', logrotate_temp_config_path])
l.debug('Cleaning up temporary logrotate config...')
os.unlink(logrotate_temp_config_path)
if __name__ == '__main__':
rotate_supervisor_logs()
|
<commit_before><commit_msg>Add script to rotate Supervisor log files<commit_after>#!/usr/bin/env python3
#
# Rotates and (optionally) compresses Supervisor logs using `logrotate`
#
# See:
#
# * https://www.rounds.com/blog/easy-logging-with-logrotate-and-supervisord/
# * https://gist.github.com/glarrain/6165987
#
import os
import subprocess
import tempfile
from mediawords.util.config import get_config
from mediawords.util.paths import mc_root_path
from mediawords.util.log import create_logger
# Max. size of a single log file (in bytes)
__LOG_MAX_SIZE = 100 * 1024 * 1024
# Number of old logs to keep
__OLD_LOG_COUNT = 7
l = create_logger(__name__)
# noinspection SpellCheckingInspection
def rotate_supervisor_logs():
root_path = mc_root_path()
l.debug('Media Cloud root path: %s' % root_path)
config = get_config()
child_log_dir = config['supervisor']['childlogdir']
l.debug('Child log directory: %s' % child_log_dir)
supervisor_logs_dir = os.path.join(root_path, child_log_dir)
l.info('Supervisor logs path: %s' % supervisor_logs_dir)
if not os.path.isdir(supervisor_logs_dir):
raise Exception('Supervisor logs directory does not exist at path: %s' % supervisor_logs_dir)
logrotate_config = '''
%(supervisor_logs_dir)s/*.log {
maxsize %(log_max_size)d
rotate %(old_log_count)d
copytruncate
compress
missingok
notifempty
}
''' % {
'supervisor_logs_dir': supervisor_logs_dir,
'log_max_size': __LOG_MAX_SIZE,
'old_log_count': __OLD_LOG_COUNT,
}
logrotate_temp_fd, logrotate_temp_config_path = tempfile.mkstemp(suffix='.conf', prefix='logrotate')
l.debug('Temporary logtorate config path: %s' % logrotate_temp_config_path)
with os.fdopen(logrotate_temp_fd, 'w') as tmp:
tmp.write(logrotate_config)
l.info('Running logrotate...')
subprocess.check_call(['logrotate', '--verbose', logrotate_temp_config_path])
l.debug('Cleaning up temporary logrotate config...')
os.unlink(logrotate_temp_config_path)
if __name__ == '__main__':
rotate_supervisor_logs()
|
|
38499870ade3e96e003373d599bb94f089e57768
|
cptm/tabular2cpt_input.py
|
cptm/tabular2cpt_input.py
|
"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
import re
from pynlpl.clients.frogclient import FrogClient
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
port = 8020
try:
frogclient = FrogClient('localhost', port)
except:
logger.error('Cannot connect to the Frog server. '
'Is it running at port {}?'.format(port))
logger.info('Start the Frog server with "docker run ''-p 127.0.0.1:{}:{} '
'-t -i proycon/lamachine frog -S {}"'.format(port, port, port))
sys.exit(1)
regex = re.compile(r'\(.*\)')
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for data in frogclient.process(text):
word, lemma, morph, ext_pos = data[:4]
if ext_pos: # ext_pos can be None
pos = regex.sub('', ext_pos)
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
|
Add script that converts a field in a tabular data file to cptm input
|
Add script that converts a field in a tabular data file to cptm input
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script that converts a field in a tabular data file to cptm input
|
"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
import re
from pynlpl.clients.frogclient import FrogClient
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
port = 8020
try:
frogclient = FrogClient('localhost', port)
except:
logger.error('Cannot connect to the Frog server. '
'Is it running at port {}?'.format(port))
logger.info('Start the Frog server with "docker run ''-p 127.0.0.1:{}:{} '
'-t -i proycon/lamachine frog -S {}"'.format(port, port, port))
sys.exit(1)
regex = re.compile(r'\(.*\)')
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for data in frogclient.process(text):
word, lemma, morph, ext_pos = data[:4]
if ext_pos: # ext_pos can be None
pos = regex.sub('', ext_pos)
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
|
<commit_before><commit_msg>Add script that converts a field in a tabular data file to cptm input<commit_after>
|
"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
import re
from pynlpl.clients.frogclient import FrogClient
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
port = 8020
try:
frogclient = FrogClient('localhost', port)
except:
logger.error('Cannot connect to the Frog server. '
'Is it running at port {}?'.format(port))
logger.info('Start the Frog server with "docker run ''-p 127.0.0.1:{}:{} '
'-t -i proycon/lamachine frog -S {}"'.format(port, port, port))
sys.exit(1)
regex = re.compile(r'\(.*\)')
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for data in frogclient.process(text):
word, lemma, morph, ext_pos = data[:4]
if ext_pos: # ext_pos can be None
pos = regex.sub('', ext_pos)
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
|
Add script that converts a field in a tabular data file to cptm input"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
import re
from pynlpl.clients.frogclient import FrogClient
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
port = 8020
try:
frogclient = FrogClient('localhost', port)
except:
logger.error('Cannot connect to the Frog server. '
'Is it running at port {}?'.format(port))
logger.info('Start the Frog server with "docker run ''-p 127.0.0.1:{}:{} '
'-t -i proycon/lamachine frog -S {}"'.format(port, port, port))
sys.exit(1)
regex = re.compile(r'\(.*\)')
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for data in frogclient.process(text):
word, lemma, morph, ext_pos = data[:4]
if ext_pos: # ext_pos can be None
pos = regex.sub('', ext_pos)
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
|
<commit_before><commit_msg>Add script that converts a field in a tabular data file to cptm input<commit_after>"""Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
import re
from pynlpl.clients.frogclient import FrogClient
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
port = 8020
try:
frogclient = FrogClient('localhost', port)
except:
logger.error('Cannot connect to the Frog server. '
'Is it running at port {}?'.format(port))
logger.info('Start the Frog server with "docker run ''-p 127.0.0.1:{}:{} '
'-t -i proycon/lamachine frog -S {}"'.format(port, port, port))
sys.exit(1)
regex = re.compile(r'\(.*\)')
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for data in frogclient.process(text):
word, lemma, morph, ext_pos = data[:4]
if ext_pos: # ext_pos can be None
pos = regex.sub('', ext_pos)
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
|
|
c32514d224704cdd247a3b1da3519af277065d8e
|
nettests/experimental/dns_injection.py
|
nettests/experimental/dns_injection.py
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
from ooni import nettest
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [
['resolver', 'r', '8.8.8.1', 'an invalid DNS resolver'],
['timeout', 't', 3, 'timeout after which we should consider the query failed']
]
class DNSInjectionTest(dnst.DNSTest):
"""
This test detects DNS spoofed DNS responses by performing UDP based DNS
queries towards an invalid DNS resolver.
For it to work we must be traversing the network segment of a machine that
is actively injecting DNS query answers.
"""
name = "DNS Injection"
description = "Checks for injection of spoofed DNS answers"
version = "0.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['resolver', 'file']
def setUp(self):
self.resolver = (self.localOptions['resolver'], 53)
self.queryTimeout = [self.localOptions['timeout']]
def inputProcessor(self, filename):
fp = open(filename)
for line in fp:
if line.startswith('http://'):
yield line.replace('http://', '').replace('/', '').strip()
else:
yield x.strip()
fp.close()
def test_injection(self):
self.report['injected'] = None
d = self.performALookup(self.input, self.resolver)
@d.addCallback
def cb(res):
log.msg("The DNS query for %s is injected" % self.input)
self.report['injected'] = True
@d.addErrback
def err(err):
err.trap(defer.TimeoutError)
log.msg("The DNS query for %s is not injected" % self.input)
self.report['injected'] = False
return d
|
Add DNS injection test for detecting censorship when DNS inject happens
|
Add DNS injection test for detecting censorship when DNS inject happens
|
Python
|
bsd-2-clause
|
lordappsec/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe
|
Add DNS injection test for detecting censorship when DNS inject happens
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
from ooni import nettest
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [
['resolver', 'r', '8.8.8.1', 'an invalid DNS resolver'],
['timeout', 't', 3, 'timeout after which we should consider the query failed']
]
class DNSInjectionTest(dnst.DNSTest):
"""
This test detects DNS spoofed DNS responses by performing UDP based DNS
queries towards an invalid DNS resolver.
For it to work we must be traversing the network segment of a machine that
is actively injecting DNS query answers.
"""
name = "DNS Injection"
description = "Checks for injection of spoofed DNS answers"
version = "0.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['resolver', 'file']
def setUp(self):
self.resolver = (self.localOptions['resolver'], 53)
self.queryTimeout = [self.localOptions['timeout']]
def inputProcessor(self, filename):
fp = open(filename)
for line in fp:
if line.startswith('http://'):
yield line.replace('http://', '').replace('/', '').strip()
else:
yield x.strip()
fp.close()
def test_injection(self):
self.report['injected'] = None
d = self.performALookup(self.input, self.resolver)
@d.addCallback
def cb(res):
log.msg("The DNS query for %s is injected" % self.input)
self.report['injected'] = True
@d.addErrback
def err(err):
err.trap(defer.TimeoutError)
log.msg("The DNS query for %s is not injected" % self.input)
self.report['injected'] = False
return d
|
<commit_before><commit_msg>Add DNS injection test for detecting censorship when DNS inject happens<commit_after>
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
from ooni import nettest
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [
['resolver', 'r', '8.8.8.1', 'an invalid DNS resolver'],
['timeout', 't', 3, 'timeout after which we should consider the query failed']
]
class DNSInjectionTest(dnst.DNSTest):
"""
This test detects DNS spoofed DNS responses by performing UDP based DNS
queries towards an invalid DNS resolver.
For it to work we must be traversing the network segment of a machine that
is actively injecting DNS query answers.
"""
name = "DNS Injection"
description = "Checks for injection of spoofed DNS answers"
version = "0.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['resolver', 'file']
def setUp(self):
self.resolver = (self.localOptions['resolver'], 53)
self.queryTimeout = [self.localOptions['timeout']]
def inputProcessor(self, filename):
fp = open(filename)
for line in fp:
if line.startswith('http://'):
yield line.replace('http://', '').replace('/', '').strip()
else:
yield x.strip()
fp.close()
def test_injection(self):
self.report['injected'] = None
d = self.performALookup(self.input, self.resolver)
@d.addCallback
def cb(res):
log.msg("The DNS query for %s is injected" % self.input)
self.report['injected'] = True
@d.addErrback
def err(err):
err.trap(defer.TimeoutError)
log.msg("The DNS query for %s is not injected" % self.input)
self.report['injected'] = False
return d
|
Add DNS injection test for detecting censorship when DNS inject happens# -*- encoding: utf-8 -*-
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
from ooni import nettest
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [
['resolver', 'r', '8.8.8.1', 'an invalid DNS resolver'],
['timeout', 't', 3, 'timeout after which we should consider the query failed']
]
class DNSInjectionTest(dnst.DNSTest):
"""
This test detects DNS spoofed DNS responses by performing UDP based DNS
queries towards an invalid DNS resolver.
For it to work we must be traversing the network segment of a machine that
is actively injecting DNS query answers.
"""
name = "DNS Injection"
description = "Checks for injection of spoofed DNS answers"
version = "0.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['resolver', 'file']
def setUp(self):
self.resolver = (self.localOptions['resolver'], 53)
self.queryTimeout = [self.localOptions['timeout']]
def inputProcessor(self, filename):
fp = open(filename)
for line in fp:
if line.startswith('http://'):
yield line.replace('http://', '').replace('/', '').strip()
else:
yield x.strip()
fp.close()
def test_injection(self):
self.report['injected'] = None
d = self.performALookup(self.input, self.resolver)
@d.addCallback
def cb(res):
log.msg("The DNS query for %s is injected" % self.input)
self.report['injected'] = True
@d.addErrback
def err(err):
err.trap(defer.TimeoutError)
log.msg("The DNS query for %s is not injected" % self.input)
self.report['injected'] = False
return d
|
<commit_before><commit_msg>Add DNS injection test for detecting censorship when DNS inject happens<commit_after># -*- encoding: utf-8 -*-
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
from ooni import nettest
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [
['resolver', 'r', '8.8.8.1', 'an invalid DNS resolver'],
['timeout', 't', 3, 'timeout after which we should consider the query failed']
]
class DNSInjectionTest(dnst.DNSTest):
"""
This test detects DNS spoofed DNS responses by performing UDP based DNS
queries towards an invalid DNS resolver.
For it to work we must be traversing the network segment of a machine that
is actively injecting DNS query answers.
"""
name = "DNS Injection"
description = "Checks for injection of spoofed DNS answers"
version = "0.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['resolver', 'file']
def setUp(self):
self.resolver = (self.localOptions['resolver'], 53)
self.queryTimeout = [self.localOptions['timeout']]
def inputProcessor(self, filename):
fp = open(filename)
for line in fp:
if line.startswith('http://'):
yield line.replace('http://', '').replace('/', '').strip()
else:
yield x.strip()
fp.close()
def test_injection(self):
self.report['injected'] = None
d = self.performALookup(self.input, self.resolver)
@d.addCallback
def cb(res):
log.msg("The DNS query for %s is injected" % self.input)
self.report['injected'] = True
@d.addErrback
def err(err):
err.trap(defer.TimeoutError)
log.msg("The DNS query for %s is not injected" % self.input)
self.report['injected'] = False
return d
|
|
ebc4f60368a1ca3216621c74733059125b565940
|
src/lexus/kucera_francis.py
|
src/lexus/kucera_francis.py
|
__author__ = 's7a'
# The Kucera Francis class
class KuceraFrancis:
# Constructor for the Kucera Francis class
def __init__(self, dict_file):
dict = open(dict_file, 'r')
# Construct the Kucera Francis frequency dictionary
self.kucera_francis_frequency = {}
for line in dict.readlines():
cols = line.split(';')
self.kucera_francis_frequency[cols[0]] = int(cols[1])
# Get the Kucera Francis frequency for a word
def frequency(self, word):
if word in self.kucera_francis_frequency:
return self.kucera_francis_frequency[word]
else:
return 0
# Get the word with maximum Kucera Francis frequency
def maximum(self, words):
result = ''
for word in words:
if self.frequency(word) > self.frequency(result):
result = word
return result
|
Add the Kucera Francis library
|
Add the Kucera Francis library
|
Python
|
mit
|
Somsubhra/Simplify,Somsubhra/Simplify,Somsubhra/Simplify
|
Add the Kucera Francis library
|
__author__ = 's7a'
# The Kucera Francis class
class KuceraFrancis:
# Constructor for the Kucera Francis class
def __init__(self, dict_file):
dict = open(dict_file, 'r')
# Construct the Kucera Francis frequency dictionary
self.kucera_francis_frequency = {}
for line in dict.readlines():
cols = line.split(';')
self.kucera_francis_frequency[cols[0]] = int(cols[1])
# Get the Kucera Francis frequency for a word
def frequency(self, word):
if word in self.kucera_francis_frequency:
return self.kucera_francis_frequency[word]
else:
return 0
# Get the word with maximum Kucera Francis frequency
def maximum(self, words):
result = ''
for word in words:
if self.frequency(word) > self.frequency(result):
result = word
return result
|
<commit_before><commit_msg>Add the Kucera Francis library<commit_after>
|
__author__ = 's7a'
# The Kucera Francis class
class KuceraFrancis:
# Constructor for the Kucera Francis class
def __init__(self, dict_file):
dict = open(dict_file, 'r')
# Construct the Kucera Francis frequency dictionary
self.kucera_francis_frequency = {}
for line in dict.readlines():
cols = line.split(';')
self.kucera_francis_frequency[cols[0]] = int(cols[1])
# Get the Kucera Francis frequency for a word
def frequency(self, word):
if word in self.kucera_francis_frequency:
return self.kucera_francis_frequency[word]
else:
return 0
# Get the word with maximum Kucera Francis frequency
def maximum(self, words):
result = ''
for word in words:
if self.frequency(word) > self.frequency(result):
result = word
return result
|
Add the Kucera Francis library__author__ = 's7a'
# The Kucera Francis class
class KuceraFrancis:
# Constructor for the Kucera Francis class
def __init__(self, dict_file):
dict = open(dict_file, 'r')
# Construct the Kucera Francis frequency dictionary
self.kucera_francis_frequency = {}
for line in dict.readlines():
cols = line.split(';')
self.kucera_francis_frequency[cols[0]] = int(cols[1])
# Get the Kucera Francis frequency for a word
def frequency(self, word):
if word in self.kucera_francis_frequency:
return self.kucera_francis_frequency[word]
else:
return 0
# Get the word with maximum Kucera Francis frequency
def maximum(self, words):
result = ''
for word in words:
if self.frequency(word) > self.frequency(result):
result = word
return result
|
<commit_before><commit_msg>Add the Kucera Francis library<commit_after>__author__ = 's7a'
# The Kucera Francis class
class KuceraFrancis:
# Constructor for the Kucera Francis class
def __init__(self, dict_file):
dict = open(dict_file, 'r')
# Construct the Kucera Francis frequency dictionary
self.kucera_francis_frequency = {}
for line in dict.readlines():
cols = line.split(';')
self.kucera_francis_frequency[cols[0]] = int(cols[1])
# Get the Kucera Francis frequency for a word
def frequency(self, word):
if word in self.kucera_francis_frequency:
return self.kucera_francis_frequency[word]
else:
return 0
# Get the word with maximum Kucera Francis frequency
def maximum(self, words):
result = ''
for word in words:
if self.frequency(word) > self.frequency(result):
result = word
return result
|
|
f144f3a36c6011203a6b1d396233e64e7a8dc089
|
examples/speed_test.py
|
examples/speed_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
from lase.drivers import Oscillo
import numpy as np
import matplotlib.pyplot as plt
import time
def speed_test(host, n_pts=200):
time_array = np.zeros(n_pts)
client = KClient(host)
driver = Oscillo(client)
t0 = time.time()
t_prev = t0
for i in range(n_pts):
for j in range(10):
a = driver.get_laser_current()
b = driver.get_adc()
c = driver.get_laser_power()
t = time.time()
time_array[i] = t - t_prev
print host, i, time_array[i]
t_prev = t
plt.plot(time_array)
driver.close()
hosts = ['192.168.1.{0}'.format(i) for i in [15,8,7,2]]
for host in hosts:
speed_test(host)
plt.show()
|
Add speed test in examples
|
Add speed test in examples
|
Python
|
mit
|
Koheron/lase
|
Add speed test in examples
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
from lase.drivers import Oscillo
import numpy as np
import matplotlib.pyplot as plt
import time
def speed_test(host, n_pts=200):
time_array = np.zeros(n_pts)
client = KClient(host)
driver = Oscillo(client)
t0 = time.time()
t_prev = t0
for i in range(n_pts):
for j in range(10):
a = driver.get_laser_current()
b = driver.get_adc()
c = driver.get_laser_power()
t = time.time()
time_array[i] = t - t_prev
print host, i, time_array[i]
t_prev = t
plt.plot(time_array)
driver.close()
hosts = ['192.168.1.{0}'.format(i) for i in [15,8,7,2]]
for host in hosts:
speed_test(host)
plt.show()
|
<commit_before><commit_msg>Add speed test in examples<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
from lase.drivers import Oscillo
import numpy as np
import matplotlib.pyplot as plt
import time
def speed_test(host, n_pts=200):
time_array = np.zeros(n_pts)
client = KClient(host)
driver = Oscillo(client)
t0 = time.time()
t_prev = t0
for i in range(n_pts):
for j in range(10):
a = driver.get_laser_current()
b = driver.get_adc()
c = driver.get_laser_power()
t = time.time()
time_array[i] = t - t_prev
print host, i, time_array[i]
t_prev = t
plt.plot(time_array)
driver.close()
hosts = ['192.168.1.{0}'.format(i) for i in [15,8,7,2]]
for host in hosts:
speed_test(host)
plt.show()
|
Add speed test in examples#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
from lase.drivers import Oscillo
import numpy as np
import matplotlib.pyplot as plt
import time
def speed_test(host, n_pts=200):
time_array = np.zeros(n_pts)
client = KClient(host)
driver = Oscillo(client)
t0 = time.time()
t_prev = t0
for i in range(n_pts):
for j in range(10):
a = driver.get_laser_current()
b = driver.get_adc()
c = driver.get_laser_power()
t = time.time()
time_array[i] = t - t_prev
print host, i, time_array[i]
t_prev = t
plt.plot(time_array)
driver.close()
hosts = ['192.168.1.{0}'.format(i) for i in [15,8,7,2]]
for host in hosts:
speed_test(host)
plt.show()
|
<commit_before><commit_msg>Add speed test in examples<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import initExample
from lase.core import KClient
from lase.drivers import Oscillo
import numpy as np
import matplotlib.pyplot as plt
import time
def speed_test(host, n_pts=200):
time_array = np.zeros(n_pts)
client = KClient(host)
driver = Oscillo(client)
t0 = time.time()
t_prev = t0
for i in range(n_pts):
for j in range(10):
a = driver.get_laser_current()
b = driver.get_adc()
c = driver.get_laser_power()
t = time.time()
time_array[i] = t - t_prev
print host, i, time_array[i]
t_prev = t
plt.plot(time_array)
driver.close()
hosts = ['192.168.1.{0}'.format(i) for i in [15,8,7,2]]
for host in hosts:
speed_test(host)
plt.show()
|
|
8661b02ca1bcfb3fab9c41c9ec6d620d4d62355b
|
src/rgb.py
|
src/rgb.py
|
#!/usr/bin/env python
### Class to control the RGB LED Indicator ###
import Adafruit_BBIO.GPIO as GPIO
import time
class RGBController(object):
def __init__(self, red_pin = "P8_10", green_pin = "P8_12", blue_pin = "P8_14"):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.setup(self.blue_pin, GPIO.OUT)
GPIO.output(self.blue_pin, GPIO.LOW)
def turnOn(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.HIGH)
elif color == "green":
GPIO.output(self.green_pin, GPIO.HIGH)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.HIGH)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.HIGH)
GPIO.output(self.blue_pin, GPIO.HIGH)
else:
pass
def turnOff(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.LOW)
elif color == "green":
GPIO.output(self.green_pin, GPIO.LOW)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.LOW)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
else:
pass
def allOff(self):
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
|
Add RGBController class for LED indicator control
|
Add RGBController class for LED indicator control
|
Python
|
mit
|
swbooking/RobotMaria
|
Add RGBController class for LED indicator control
|
#!/usr/bin/env python
### Class to control the RGB LED Indicator ###
import Adafruit_BBIO.GPIO as GPIO
import time
class RGBController(object):
def __init__(self, red_pin = "P8_10", green_pin = "P8_12", blue_pin = "P8_14"):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.setup(self.blue_pin, GPIO.OUT)
GPIO.output(self.blue_pin, GPIO.LOW)
def turnOn(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.HIGH)
elif color == "green":
GPIO.output(self.green_pin, GPIO.HIGH)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.HIGH)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.HIGH)
GPIO.output(self.blue_pin, GPIO.HIGH)
else:
pass
def turnOff(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.LOW)
elif color == "green":
GPIO.output(self.green_pin, GPIO.LOW)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.LOW)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
else:
pass
def allOff(self):
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
|
<commit_before><commit_msg>Add RGBController class for LED indicator control<commit_after>
|
#!/usr/bin/env python
### Class to control the RGB LED Indicator ###
import Adafruit_BBIO.GPIO as GPIO
import time
class RGBController(object):
def __init__(self, red_pin = "P8_10", green_pin = "P8_12", blue_pin = "P8_14"):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.setup(self.blue_pin, GPIO.OUT)
GPIO.output(self.blue_pin, GPIO.LOW)
def turnOn(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.HIGH)
elif color == "green":
GPIO.output(self.green_pin, GPIO.HIGH)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.HIGH)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.HIGH)
GPIO.output(self.blue_pin, GPIO.HIGH)
else:
pass
def turnOff(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.LOW)
elif color == "green":
GPIO.output(self.green_pin, GPIO.LOW)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.LOW)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
else:
pass
def allOff(self):
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
|
Add RGBController class for LED indicator control#!/usr/bin/env python
### Class to control the RGB LED Indicator ###
import Adafruit_BBIO.GPIO as GPIO
import time
class RGBController(object):
def __init__(self, red_pin = "P8_10", green_pin = "P8_12", blue_pin = "P8_14"):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.setup(self.blue_pin, GPIO.OUT)
GPIO.output(self.blue_pin, GPIO.LOW)
def turnOn(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.HIGH)
elif color == "green":
GPIO.output(self.green_pin, GPIO.HIGH)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.HIGH)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.HIGH)
GPIO.output(self.blue_pin, GPIO.HIGH)
else:
pass
def turnOff(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.LOW)
elif color == "green":
GPIO.output(self.green_pin, GPIO.LOW)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.LOW)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
else:
pass
def allOff(self):
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
|
<commit_before><commit_msg>Add RGBController class for LED indicator control<commit_after>#!/usr/bin/env python
### Class to control the RGB LED Indicator ###
import Adafruit_BBIO.GPIO as GPIO
import time
class RGBController(object):
def __init__(self, red_pin = "P8_10", green_pin = "P8_12", blue_pin = "P8_14"):
self.red_pin = red_pin
self.green_pin = green_pin
self.blue_pin = blue_pin
GPIO.setup(self.red_pin, GPIO.OUT)
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.setup(self.green_pin, GPIO.OUT)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.setup(self.blue_pin, GPIO.OUT)
GPIO.output(self.blue_pin, GPIO.LOW)
def turnOn(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.HIGH)
elif color == "green":
GPIO.output(self.green_pin, GPIO.HIGH)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.HIGH)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.HIGH)
GPIO.output(self.blue_pin, GPIO.HIGH)
else:
pass
def turnOff(self, color):
if color == "red":
GPIO.output(self.red_pin, GPIO.LOW)
elif color == "green":
GPIO.output(self.green_pin, GPIO.LOW)
elif color == "blue":
GPIO.output(self.blue_pin, GPIO.LOW)
elif color == "purple":
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
else:
pass
def allOff(self):
GPIO.output(self.red_pin, GPIO.LOW)
GPIO.output(self.green_pin, GPIO.LOW)
GPIO.output(self.blue_pin, GPIO.LOW)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.