commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
41beca23fff6eab718550d0ce8d22769653c3109
|
sauce_test/test_suite.py
|
sauce_test/test_suite.py
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
import test_dataverse
import test_dataset
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
unittest.makeSuite(test_dataverse.TestDataverseFunctions),
unittest.makeSuite(test_dataset.TestDatasetFunctions),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
Update test suite to include dataverse and dataset tests.
|
Update test suite to include dataverse and dataset tests.
|
Python
|
apache-2.0
|
ekoi/DANS-DVN-4.6.1,ekoi/DANS-DVN-4.6.1,quarian/dataverse,leeper/dataverse-1,leeper/dataverse-1,bmckinney/dataverse-canonical,bmckinney/dataverse-canonical,JayanthyChengan/dataverse,quarian/dataverse,quarian/dataverse,leeper/dataverse-1,leeper/dataverse-1,JayanthyChengan/dataverse,JayanthyChengan/dataverse,quarian/dataverse,majorseitan/dataverse,bmckinney/dataverse-canonical,JayanthyChengan/dataverse,ekoi/DANS-DVN-4.6.1,quarian/dataverse,majorseitan/dataverse,JayanthyChengan/dataverse,quarian/dataverse,majorseitan/dataverse,jacksonokuhn/dataverse,majorseitan/dataverse,ekoi/DANS-DVN-4.6.1,JayanthyChengan/dataverse,jacksonokuhn/dataverse,jacksonokuhn/dataverse,quarian/dataverse,ekoi/DANS-DVN-4.6.1,majorseitan/dataverse,jacksonokuhn/dataverse,leeper/dataverse-1,leeper/dataverse-1,bmckinney/dataverse-canonical,ekoi/DANS-DVN-4.6.1,jacksonokuhn/dataverse,jacksonokuhn/dataverse,JayanthyChengan/dataverse,bmckinney/dataverse-canonical,leeper/dataverse-1,majorseitan/dataverse,JayanthyChengan/dataverse,leeper/dataverse-1,majorseitan/dataverse,ekoi/DANS-DVN-4.6.1,bmckinney/dataverse-canonical,jacksonokuhn/dataverse,majorseitan/dataverse,bmckinney/dataverse-canonical,jacksonokuhn/dataverse,bmckinney/dataverse-canonical,ekoi/DANS-DVN-4.6.1,quarian/dataverse
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())Update test suite to include dataverse and dataset tests.
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
import test_dataverse
import test_dataset
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
unittest.makeSuite(test_dataverse.TestDataverseFunctions),
unittest.makeSuite(test_dataset.TestDatasetFunctions),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
<commit_before># This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())<commit_msg>Update test suite to include dataverse and dataset tests.<commit_after>
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
import test_dataverse
import test_dataset
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
unittest.makeSuite(test_dataverse.TestDataverseFunctions),
unittest.makeSuite(test_dataset.TestDatasetFunctions),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())Update test suite to include dataverse and dataset tests.# This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
import test_dataverse
import test_dataset
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
unittest.makeSuite(test_dataverse.TestDataverseFunctions),
unittest.makeSuite(test_dataset.TestDatasetFunctions),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
<commit_before># This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())<commit_msg>Update test suite to include dataverse and dataset tests.<commit_after># This contain a list of individual test and will be run from Jenkins.
import unittest
import access_dvn
import test_dataverse
import test_dataset
# This is a list of testFileName.testClass
def suite():
return unittest.TestSuite((\
unittest.makeSuite(access_dvn.AccessDVN),
unittest.makeSuite(test_dataverse.TestDataverseFunctions),
unittest.makeSuite(test_dataset.TestDatasetFunctions),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
# sys.exit(not result.wasSuccessful())
|
a13f23b5f1b11b40db3325c46add3508f43cf649
|
python/qilinguist/test/test_qilinguist_list.py
|
python/qilinguist/test/test_qilinguist_list.py
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
from qibuild.test.conftest import TestBuildWorkTree
def test_simple(qilinguist_action, record_messages):
build_worktree = TestBuildWorkTree()
build_worktree.add_test_project("translateme/qt")
qilinguist_action("list")
assert record_messages.find("\*\s+helloqt")
assert record_messages.find("\*\s+translate")
|
Add a test for `qilinguist list`
|
Add a test for `qilinguist list`
Change-Id: Ia2b2e937732e80a231df9ca57a9d9f39d337da1f
Reviewed-on: http://gerrit.aldebaran.lan/62796
Tested-by: gerrit
Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>
|
Python
|
bsd-3-clause
|
aldebaran/qibuild,aldebaran/qibuild,aldebaran/qibuild,aldebaran/qibuild
|
Add a test for `qilinguist list`
Change-Id: Ia2b2e937732e80a231df9ca57a9d9f39d337da1f
Reviewed-on: http://gerrit.aldebaran.lan/62796
Tested-by: gerrit
Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
from qibuild.test.conftest import TestBuildWorkTree
def test_simple(qilinguist_action, record_messages):
build_worktree = TestBuildWorkTree()
build_worktree.add_test_project("translateme/qt")
qilinguist_action("list")
assert record_messages.find("\*\s+helloqt")
assert record_messages.find("\*\s+translate")
|
<commit_before><commit_msg>Add a test for `qilinguist list`
Change-Id: Ia2b2e937732e80a231df9ca57a9d9f39d337da1f
Reviewed-on: http://gerrit.aldebaran.lan/62796
Tested-by: gerrit
Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com><commit_after>
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
from qibuild.test.conftest import TestBuildWorkTree
def test_simple(qilinguist_action, record_messages):
build_worktree = TestBuildWorkTree()
build_worktree.add_test_project("translateme/qt")
qilinguist_action("list")
assert record_messages.find("\*\s+helloqt")
assert record_messages.find("\*\s+translate")
|
Add a test for `qilinguist list`
Change-Id: Ia2b2e937732e80a231df9ca57a9d9f39d337da1f
Reviewed-on: http://gerrit.aldebaran.lan/62796
Tested-by: gerrit
Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
from qibuild.test.conftest import TestBuildWorkTree
def test_simple(qilinguist_action, record_messages):
build_worktree = TestBuildWorkTree()
build_worktree.add_test_project("translateme/qt")
qilinguist_action("list")
assert record_messages.find("\*\s+helloqt")
assert record_messages.find("\*\s+translate")
|
<commit_before><commit_msg>Add a test for `qilinguist list`
Change-Id: Ia2b2e937732e80a231df9ca57a9d9f39d337da1f
Reviewed-on: http://gerrit.aldebaran.lan/62796
Tested-by: gerrit
Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com><commit_after>## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
from qibuild.test.conftest import TestBuildWorkTree
def test_simple(qilinguist_action, record_messages):
build_worktree = TestBuildWorkTree()
build_worktree.add_test_project("translateme/qt")
qilinguist_action("list")
assert record_messages.find("\*\s+helloqt")
assert record_messages.find("\*\s+translate")
|
|
f3e0664c4b062587badaa86ddfc9e75711ecf631
|
coding-practice/Trie/count_using_wildcards.py
|
coding-practice/Trie/count_using_wildcards.py
|
class Trie:
def __init__(self):
self.root = {}
self.end = '*'
def insert(self, word):
cur = self.root
for c in word:
if c in cur:
cur = cur[c]
else:
cur[c] = {}
cur = cur[c]
cur[self.end] = True
def count_wildcard(self, wc):
cur = self.root
to_process = [cur]
for c in wc:
if not to_process:
return 0
if c == '?':
process_next = []
while to_process:
cur = to_process.pop()
for k in cur:
if k != self.end:
process_next.append(cur[k])
to_process = process_next
else:
process_next = []
while to_process:
cur = to_process.pop()
if c in cur:
process_next.append(cur[c])
to_process = process_next
count = 0
for tp in to_process:
if self.end in tp:
count += 1
return count
# for each query find number of words which match that pattern. "?" is wildcard character.
input_strings = ["Cat", "Bat", "Pat", "Man", "Jam", "Jan"]
queries = ["?at", "?a?", "J??"]
tr = Trie()
for ip in input_strings:
tr.insert(ip)
for q in queries:
print(tr.count_wildcard(q))
|
Use Trie to find no of words matching pattern
|
Use Trie to find no of words matching pattern
|
Python
|
mit
|
sayak1711/coding_solutions,sayak1711/coding_solutions,sayak1711/coding_solutions,sayak1711/coding_solutions
|
Use Trie to find no of words matching pattern
|
class Trie:
def __init__(self):
self.root = {}
self.end = '*'
def insert(self, word):
cur = self.root
for c in word:
if c in cur:
cur = cur[c]
else:
cur[c] = {}
cur = cur[c]
cur[self.end] = True
def count_wildcard(self, wc):
cur = self.root
to_process = [cur]
for c in wc:
if not to_process:
return 0
if c == '?':
process_next = []
while to_process:
cur = to_process.pop()
for k in cur:
if k != self.end:
process_next.append(cur[k])
to_process = process_next
else:
process_next = []
while to_process:
cur = to_process.pop()
if c in cur:
process_next.append(cur[c])
to_process = process_next
count = 0
for tp in to_process:
if self.end in tp:
count += 1
return count
# for each query find number of words which match that pattern. "?" is wildcard character.
input_strings = ["Cat", "Bat", "Pat", "Man", "Jam", "Jan"]
queries = ["?at", "?a?", "J??"]
tr = Trie()
for ip in input_strings:
tr.insert(ip)
for q in queries:
print(tr.count_wildcard(q))
|
<commit_before><commit_msg>Use Trie to find no of words matching pattern<commit_after>
|
class Trie:
def __init__(self):
self.root = {}
self.end = '*'
def insert(self, word):
cur = self.root
for c in word:
if c in cur:
cur = cur[c]
else:
cur[c] = {}
cur = cur[c]
cur[self.end] = True
def count_wildcard(self, wc):
cur = self.root
to_process = [cur]
for c in wc:
if not to_process:
return 0
if c == '?':
process_next = []
while to_process:
cur = to_process.pop()
for k in cur:
if k != self.end:
process_next.append(cur[k])
to_process = process_next
else:
process_next = []
while to_process:
cur = to_process.pop()
if c in cur:
process_next.append(cur[c])
to_process = process_next
count = 0
for tp in to_process:
if self.end in tp:
count += 1
return count
# for each query find number of words which match that pattern. "?" is wildcard character.
input_strings = ["Cat", "Bat", "Pat", "Man", "Jam", "Jan"]
queries = ["?at", "?a?", "J??"]
tr = Trie()
for ip in input_strings:
tr.insert(ip)
for q in queries:
print(tr.count_wildcard(q))
|
Use Trie to find no of words matching patternclass Trie:
def __init__(self):
self.root = {}
self.end = '*'
def insert(self, word):
cur = self.root
for c in word:
if c in cur:
cur = cur[c]
else:
cur[c] = {}
cur = cur[c]
cur[self.end] = True
def count_wildcard(self, wc):
cur = self.root
to_process = [cur]
for c in wc:
if not to_process:
return 0
if c == '?':
process_next = []
while to_process:
cur = to_process.pop()
for k in cur:
if k != self.end:
process_next.append(cur[k])
to_process = process_next
else:
process_next = []
while to_process:
cur = to_process.pop()
if c in cur:
process_next.append(cur[c])
to_process = process_next
count = 0
for tp in to_process:
if self.end in tp:
count += 1
return count
# for each query find number of words which match that pattern. "?" is wildcard character.
input_strings = ["Cat", "Bat", "Pat", "Man", "Jam", "Jan"]
queries = ["?at", "?a?", "J??"]
tr = Trie()
for ip in input_strings:
tr.insert(ip)
for q in queries:
print(tr.count_wildcard(q))
|
<commit_before><commit_msg>Use Trie to find no of words matching pattern<commit_after>class Trie:
def __init__(self):
self.root = {}
self.end = '*'
def insert(self, word):
cur = self.root
for c in word:
if c in cur:
cur = cur[c]
else:
cur[c] = {}
cur = cur[c]
cur[self.end] = True
def count_wildcard(self, wc):
cur = self.root
to_process = [cur]
for c in wc:
if not to_process:
return 0
if c == '?':
process_next = []
while to_process:
cur = to_process.pop()
for k in cur:
if k != self.end:
process_next.append(cur[k])
to_process = process_next
else:
process_next = []
while to_process:
cur = to_process.pop()
if c in cur:
process_next.append(cur[c])
to_process = process_next
count = 0
for tp in to_process:
if self.end in tp:
count += 1
return count
# for each query find number of words which match that pattern. "?" is wildcard character.
input_strings = ["Cat", "Bat", "Pat", "Man", "Jam", "Jan"]
queries = ["?at", "?a?", "J??"]
tr = Trie()
for ip in input_strings:
tr.insert(ip)
for q in queries:
print(tr.count_wildcard(q))
|
|
e66c6289a7986048329e86162fa59441df7b9a0a
|
studies/migrations/0003_auto_20170615_1404.py
|
studies/migrations/0003_auto_20170615_1404.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0002_auto_20170607_1800'),
]
operations = [
migrations.RenameField(
model_name='study',
old_name='blocks',
new_name='structure',
),
migrations.AddField(
model_name='study',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='study',
name='display_full_screen',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='study',
name='exit_url',
field=models.URLField(default='https://lookit.mit.edu/'),
),
]
|
Add migration for changes to study model.
|
Add migration for changes to study model.
|
Python
|
apache-2.0
|
CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,pattisdr/lookit-api,pattisdr/lookit-api,pattisdr/lookit-api
|
Add migration for changes to study model.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0002_auto_20170607_1800'),
]
operations = [
migrations.RenameField(
model_name='study',
old_name='blocks',
new_name='structure',
),
migrations.AddField(
model_name='study',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='study',
name='display_full_screen',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='study',
name='exit_url',
field=models.URLField(default='https://lookit.mit.edu/'),
),
]
|
<commit_before><commit_msg>Add migration for changes to study model.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0002_auto_20170607_1800'),
]
operations = [
migrations.RenameField(
model_name='study',
old_name='blocks',
new_name='structure',
),
migrations.AddField(
model_name='study',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='study',
name='display_full_screen',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='study',
name='exit_url',
field=models.URLField(default='https://lookit.mit.edu/'),
),
]
|
Add migration for changes to study model.# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0002_auto_20170607_1800'),
]
operations = [
migrations.RenameField(
model_name='study',
old_name='blocks',
new_name='structure',
),
migrations.AddField(
model_name='study',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='study',
name='display_full_screen',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='study',
name='exit_url',
field=models.URLField(default='https://lookit.mit.edu/'),
),
]
|
<commit_before><commit_msg>Add migration for changes to study model.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-15 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studies', '0002_auto_20170607_1800'),
]
operations = [
migrations.RenameField(
model_name='study',
old_name='blocks',
new_name='structure',
),
migrations.AddField(
model_name='study',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='study',
name='display_full_screen',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='study',
name='exit_url',
field=models.URLField(default='https://lookit.mit.edu/'),
),
]
|
|
ef72df34a42eb32409a4928346382f6b5e670000
|
core/migrations/0007_pin_private.py
|
core/migrations/0007_pin_private.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 05:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_pin_origin'),
]
operations = [
migrations.AddField(
model_name='pin',
name='private',
field=models.BooleanField(default=False),
),
]
|
Add migrations for private property of pin
|
Feature: Add migrations for private property of pin
|
Python
|
bsd-2-clause
|
lapo-luchini/pinry,lapo-luchini/pinry,pinry/pinry,pinry/pinry,pinry/pinry,lapo-luchini/pinry,pinry/pinry,lapo-luchini/pinry
|
Feature: Add migrations for private property of pin
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 05:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_pin_origin'),
]
operations = [
migrations.AddField(
model_name='pin',
name='private',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>Feature: Add migrations for private property of pin<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 05:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_pin_origin'),
]
operations = [
migrations.AddField(
model_name='pin',
name='private',
field=models.BooleanField(default=False),
),
]
|
Feature: Add migrations for private property of pin# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 05:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_pin_origin'),
]
operations = [
migrations.AddField(
model_name='pin',
name='private',
field=models.BooleanField(default=False),
),
]
|
<commit_before><commit_msg>Feature: Add migrations for private property of pin<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-02-11 05:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_remove_pin_origin'),
]
operations = [
migrations.AddField(
model_name='pin',
name='private',
field=models.BooleanField(default=False),
),
]
|
|
48dbaf596ee8af826d7e9dec96fee0c626005281
|
examples/web_cookies.py
|
examples/web_cookies.py
|
#!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
import asyncio
from aiohttp import web
tmpl = '''\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
{}
</body>
</html>'''
@asyncio.coroutine
def root(request):
resp = web.Response(content_type='text/html')
resp.text = tmpl.format(request.cookies)
return resp
@asyncio.coroutine
def login(request):
resp = web.HTTPFound(location='/')
resp.set_cookie('AUTH', 'secret')
return resp
@asyncio.coroutine
def logout(request):
resp = web.HTTPFound(location='/')
resp.del_cookie('AUTH')
return resp
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', root)
app.router.add_route('GET', '/login', login)
app.router.add_route('GET', '/logout', logout)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
Add example for server-side cookies
|
Add example for server-side cookies
|
Python
|
apache-2.0
|
z2v/aiohttp,avanov/aiohttp,singulared/aiohttp,alunduil/aiohttp,danielnelson/aiohttp,hellysmile/aiohttp,pfreixes/aiohttp,pathcl/aiohttp,morgan-del/aiohttp,alexsdutton/aiohttp,decentfox/aiohttp,panda73111/aiohttp,alex-eri/aiohttp-1,AraHaanOrg/aiohttp,elastic-coders/aiohttp,pfreixes/aiohttp,elastic-coders/aiohttp,mind1master/aiohttp,vaskalas/aiohttp,arthurdarcet/aiohttp,jashandeep-sohi/aiohttp,jashandeep-sohi/aiohttp,noplay/aiohttp,juliatem/aiohttp,moden-py/aiohttp,sterwill/aiohttp,Eyepea/aiohttp,mind1master/aiohttp,AlexLisovoy/aiohttp,andyaguiar/aiohttp,AlexLisovoy/aiohttp,jettify/aiohttp,rutsky/aiohttp,esaezgil/aiohttp,alexsdutton/aiohttp,vedun/aiohttp,decentfox/aiohttp,vaskalas/aiohttp,elastic-coders/aiohttp,rutsky/aiohttp,noplay/aiohttp,alex-eri/aiohttp-1,vaskalas/aiohttp,alex-eri/aiohttp-1,z2v/aiohttp,hellysmile/aiohttp,jettify/aiohttp,noodle-learns-programming/aiohttp,singulared/aiohttp,singulared/aiohttp,Srogozins/aiohttp,KeepSafe/aiohttp,moden-py/aiohttp,z2v/aiohttp,KeepSafe/aiohttp,jojurajan/aiohttp,jashandeep-sohi/aiohttp,arthurdarcet/aiohttp,playpauseandstop/aiohttp,flying-sheep/aiohttp,esaezgil/aiohttp,panda73111/aiohttp,moden-py/aiohttp,jojurajan/aiohttp,AraHaanOrg/aiohttp,esaezgil/aiohttp,iksteen/aiohttp,KeepSafe/aiohttp,arthurdarcet/aiohttp,decentfox/aiohttp,panda73111/aiohttp,juliatem/aiohttp,Insoleet/aiohttp,vasylbo/aiohttp,mind1master/aiohttp,rutsky/aiohttp,jettify/aiohttp,iksteen/aiohttp
|
Add example for server-side cookies
|
#!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
import asyncio
from aiohttp import web
tmpl = '''\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
{}
</body>
</html>'''
@asyncio.coroutine
def root(request):
resp = web.Response(content_type='text/html')
resp.text = tmpl.format(request.cookies)
return resp
@asyncio.coroutine
def login(request):
resp = web.HTTPFound(location='/')
resp.set_cookie('AUTH', 'secret')
return resp
@asyncio.coroutine
def logout(request):
resp = web.HTTPFound(location='/')
resp.del_cookie('AUTH')
return resp
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', root)
app.router.add_route('GET', '/login', login)
app.router.add_route('GET', '/logout', logout)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
<commit_before><commit_msg>Add example for server-side cookies<commit_after>
|
#!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
import asyncio
from aiohttp import web
tmpl = '''\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
{}
</body>
</html>'''
@asyncio.coroutine
def root(request):
resp = web.Response(content_type='text/html')
resp.text = tmpl.format(request.cookies)
return resp
@asyncio.coroutine
def login(request):
resp = web.HTTPFound(location='/')
resp.set_cookie('AUTH', 'secret')
return resp
@asyncio.coroutine
def logout(request):
resp = web.HTTPFound(location='/')
resp.del_cookie('AUTH')
return resp
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', root)
app.router.add_route('GET', '/login', login)
app.router.add_route('GET', '/logout', logout)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
Add example for server-side cookies#!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
import asyncio
from aiohttp import web
tmpl = '''\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
{}
</body>
</html>'''
@asyncio.coroutine
def root(request):
resp = web.Response(content_type='text/html')
resp.text = tmpl.format(request.cookies)
return resp
@asyncio.coroutine
def login(request):
resp = web.HTTPFound(location='/')
resp.set_cookie('AUTH', 'secret')
return resp
@asyncio.coroutine
def logout(request):
resp = web.HTTPFound(location='/')
resp.del_cookie('AUTH')
return resp
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', root)
app.router.add_route('GET', '/login', login)
app.router.add_route('GET', '/logout', logout)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
<commit_before><commit_msg>Add example for server-side cookies<commit_after>#!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
import asyncio
from aiohttp import web
tmpl = '''\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
{}
</body>
</html>'''
@asyncio.coroutine
def root(request):
resp = web.Response(content_type='text/html')
resp.text = tmpl.format(request.cookies)
return resp
@asyncio.coroutine
def login(request):
resp = web.HTTPFound(location='/')
resp.set_cookie('AUTH', 'secret')
return resp
@asyncio.coroutine
def logout(request):
resp = web.HTTPFound(location='/')
resp.del_cookie('AUTH')
return resp
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', root)
app.router.add_route('GET', '/login', login)
app.router.add_route('GET', '/logout', logout)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
|
dca99dab5e09e6767708e92f9bf92e54a9aa9ed4
|
hecuba_py/tests/storage_api_tests.py
|
hecuba_py/tests/storage_api_tests.py
|
import unittest
from storage.api import getByID
from hecuba.hdict import StorageDict
class ApiTestSDict(StorageDict):
'''
@TypeSpec <<key:int>, value:double>
'''
class StorageApi_Tests(unittest.TestCase):
def setUp(self):
pass
def class_type_test(self):
base_dict = ApiTestSDict('test.api_sdict')
storage_id = base_dict.getID()
del base_dict
rebuild_dict = getByID(storage_id)
self.assertTrue(isinstance(rebuild_dict, ApiTestSDict))
|
Test build remotely SDict wrong class
|
Test build remotely SDict wrong class
|
Python
|
apache-2.0
|
bsc-dd/hecuba,bsc-dd/hecuba,bsc-dd/hecuba,bsc-dd/hecuba
|
Test build remotely SDict wrong class
|
import unittest
from storage.api import getByID
from hecuba.hdict import StorageDict
class ApiTestSDict(StorageDict):
'''
@TypeSpec <<key:int>, value:double>
'''
class StorageApi_Tests(unittest.TestCase):
def setUp(self):
pass
def class_type_test(self):
base_dict = ApiTestSDict('test.api_sdict')
storage_id = base_dict.getID()
del base_dict
rebuild_dict = getByID(storage_id)
self.assertTrue(isinstance(rebuild_dict, ApiTestSDict))
|
<commit_before><commit_msg>Test build remotely SDict wrong class<commit_after>
|
import unittest
from storage.api import getByID
from hecuba.hdict import StorageDict
class ApiTestSDict(StorageDict):
'''
@TypeSpec <<key:int>, value:double>
'''
class StorageApi_Tests(unittest.TestCase):
def setUp(self):
pass
def class_type_test(self):
base_dict = ApiTestSDict('test.api_sdict')
storage_id = base_dict.getID()
del base_dict
rebuild_dict = getByID(storage_id)
self.assertTrue(isinstance(rebuild_dict, ApiTestSDict))
|
Test build remotely SDict wrong classimport unittest
from storage.api import getByID
from hecuba.hdict import StorageDict
class ApiTestSDict(StorageDict):
'''
@TypeSpec <<key:int>, value:double>
'''
class StorageApi_Tests(unittest.TestCase):
def setUp(self):
pass
def class_type_test(self):
base_dict = ApiTestSDict('test.api_sdict')
storage_id = base_dict.getID()
del base_dict
rebuild_dict = getByID(storage_id)
self.assertTrue(isinstance(rebuild_dict, ApiTestSDict))
|
<commit_before><commit_msg>Test build remotely SDict wrong class<commit_after>import unittest
from storage.api import getByID
from hecuba.hdict import StorageDict
class ApiTestSDict(StorageDict):
'''
@TypeSpec <<key:int>, value:double>
'''
class StorageApi_Tests(unittest.TestCase):
def setUp(self):
pass
def class_type_test(self):
base_dict = ApiTestSDict('test.api_sdict')
storage_id = base_dict.getID()
del base_dict
rebuild_dict = getByID(storage_id)
self.assertTrue(isinstance(rebuild_dict, ApiTestSDict))
|
|
b28d544167dcee75ff7476ae3d43d51191aca717
|
reservation_gap.py
|
reservation_gap.py
|
from collections import Counter
from boto.ec2 import connect_to_region
REGION = 'us-east-1'
COUNTER_KEY_FMT = '{availability_zone},{instance_type}'
ec2 = connect_to_region(REGION)
running_instance_counter = Counter()
current_reservations_counter = Counter()
def get_instances(ec2):
reservations = ec2.get_all_instances(filters={
'instance-state-name': 'running',
})
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
return instances
instances = get_instances(ec2)
for instance in instances:
counter_key = COUNTER_KEY_FMT.format(
availability_zone=instance.placement,
instance_type=instance.instance_type,
)
running_instance_counter[counter_key] += 1
for reservation in ec2.get_all_reserved_instances(filters={'state': 'active'}):
counter_key = COUNTER_KEY_FMT.format(
availability_zone=reservation.availability_zone,
instance_type=reservation.instance_type,
)
current_reservations_counter[counter_key] += reservation.instance_count
running_non_reserved_instances = (
running_instance_counter - current_reservations_counter
)
reservations_not_in_use = (
current_reservations_counter - running_instance_counter
)
print 'Reservation Gaps'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(running_non_reserved_instances.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
print 'Unused Reservations'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(reservations_not_in_use.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
|
Add script to find gaps between on-demand instances and reservations.
|
Add script to find gaps between on-demand instances and reservations.
|
Python
|
mit
|
kjoconnor/aws
|
Add script to find gaps between on-demand instances and reservations.
|
from collections import Counter
from boto.ec2 import connect_to_region
REGION = 'us-east-1'
COUNTER_KEY_FMT = '{availability_zone},{instance_type}'
ec2 = connect_to_region(REGION)
running_instance_counter = Counter()
current_reservations_counter = Counter()
def get_instances(ec2):
reservations = ec2.get_all_instances(filters={
'instance-state-name': 'running',
})
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
return instances
instances = get_instances(ec2)
for instance in instances:
counter_key = COUNTER_KEY_FMT.format(
availability_zone=instance.placement,
instance_type=instance.instance_type,
)
running_instance_counter[counter_key] += 1
for reservation in ec2.get_all_reserved_instances(filters={'state': 'active'}):
counter_key = COUNTER_KEY_FMT.format(
availability_zone=reservation.availability_zone,
instance_type=reservation.instance_type,
)
current_reservations_counter[counter_key] += reservation.instance_count
running_non_reserved_instances = (
running_instance_counter - current_reservations_counter
)
reservations_not_in_use = (
current_reservations_counter - running_instance_counter
)
print 'Reservation Gaps'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(running_non_reserved_instances.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
print 'Unused Reservations'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(reservations_not_in_use.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
|
<commit_before><commit_msg>Add script to find gaps between on-demand instances and reservations.<commit_after>
|
from collections import Counter
from boto.ec2 import connect_to_region
REGION = 'us-east-1'
COUNTER_KEY_FMT = '{availability_zone},{instance_type}'
ec2 = connect_to_region(REGION)
running_instance_counter = Counter()
current_reservations_counter = Counter()
def get_instances(ec2):
reservations = ec2.get_all_instances(filters={
'instance-state-name': 'running',
})
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
return instances
instances = get_instances(ec2)
for instance in instances:
counter_key = COUNTER_KEY_FMT.format(
availability_zone=instance.placement,
instance_type=instance.instance_type,
)
running_instance_counter[counter_key] += 1
for reservation in ec2.get_all_reserved_instances(filters={'state': 'active'}):
counter_key = COUNTER_KEY_FMT.format(
availability_zone=reservation.availability_zone,
instance_type=reservation.instance_type,
)
current_reservations_counter[counter_key] += reservation.instance_count
running_non_reserved_instances = (
running_instance_counter - current_reservations_counter
)
reservations_not_in_use = (
current_reservations_counter - running_instance_counter
)
print 'Reservation Gaps'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(running_non_reserved_instances.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
print 'Unused Reservations'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(reservations_not_in_use.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
|
Add script to find gaps between on-demand instances and reservations.from collections import Counter
from boto.ec2 import connect_to_region
REGION = 'us-east-1'
COUNTER_KEY_FMT = '{availability_zone},{instance_type}'
ec2 = connect_to_region(REGION)
running_instance_counter = Counter()
current_reservations_counter = Counter()
def get_instances(ec2):
reservations = ec2.get_all_instances(filters={
'instance-state-name': 'running',
})
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
return instances
instances = get_instances(ec2)
for instance in instances:
counter_key = COUNTER_KEY_FMT.format(
availability_zone=instance.placement,
instance_type=instance.instance_type,
)
running_instance_counter[counter_key] += 1
for reservation in ec2.get_all_reserved_instances(filters={'state': 'active'}):
counter_key = COUNTER_KEY_FMT.format(
availability_zone=reservation.availability_zone,
instance_type=reservation.instance_type,
)
current_reservations_counter[counter_key] += reservation.instance_count
running_non_reserved_instances = (
running_instance_counter - current_reservations_counter
)
reservations_not_in_use = (
current_reservations_counter - running_instance_counter
)
print 'Reservation Gaps'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(running_non_reserved_instances.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
print 'Unused Reservations'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(reservations_not_in_use.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
|
<commit_before><commit_msg>Add script to find gaps between on-demand instances and reservations.<commit_after>from collections import Counter
from boto.ec2 import connect_to_region
REGION = 'us-east-1'
COUNTER_KEY_FMT = '{availability_zone},{instance_type}'
ec2 = connect_to_region(REGION)
running_instance_counter = Counter()
current_reservations_counter = Counter()
def get_instances(ec2):
reservations = ec2.get_all_instances(filters={
'instance-state-name': 'running',
})
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
return instances
instances = get_instances(ec2)
for instance in instances:
counter_key = COUNTER_KEY_FMT.format(
availability_zone=instance.placement,
instance_type=instance.instance_type,
)
running_instance_counter[counter_key] += 1
for reservation in ec2.get_all_reserved_instances(filters={'state': 'active'}):
counter_key = COUNTER_KEY_FMT.format(
availability_zone=reservation.availability_zone,
instance_type=reservation.instance_type,
)
current_reservations_counter[counter_key] += reservation.instance_count
running_non_reserved_instances = (
running_instance_counter - current_reservations_counter
)
reservations_not_in_use = (
current_reservations_counter - running_instance_counter
)
print 'Reservation Gaps'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(running_non_reserved_instances.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
print 'Unused Reservations'
print 'availability_zone, instance_type, count'
for counter_key, count in sorted(reservations_not_in_use.items(), key=lambda x: x[1], reverse=True):
availability_zone, instance_type = counter_key.split(',')
print ','.join([availability_zone, instance_type, str(count)])
print '-------------------------------------------'
|
|
22e4430a55c17a5b2f77c90f8862530a3ae7f0d6
|
accounts/migrations/0005_auto_20170621_1843.py
|
accounts/migrations/0005_auto_20170621_1843.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170607_1800'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.EmailField(max_length=254, unique=True, verbose_name='Email address'),
),
]
|
Add migration for verbose name.
|
Add migration for verbose name.
|
Python
|
apache-2.0
|
pattisdr/lookit-api,CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,pattisdr/lookit-api,pattisdr/lookit-api
|
Add migration for verbose name.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170607_1800'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.EmailField(max_length=254, unique=True, verbose_name='Email address'),
),
]
|
<commit_before><commit_msg>Add migration for verbose name.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170607_1800'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.EmailField(max_length=254, unique=True, verbose_name='Email address'),
),
]
|
Add migration for verbose name.# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170607_1800'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.EmailField(max_length=254, unique=True, verbose_name='Email address'),
),
]
|
<commit_before><commit_msg>Add migration for verbose name.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-21 18:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170607_1800'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.EmailField(max_length=254, unique=True, verbose_name='Email address'),
),
]
|
|
5e229787c013cc66fe202964ffb733a2d8f5aa6a
|
webauthn2/scripts/globus_test_client.py
|
webauthn2/scripts/globus_test_client.py
|
from globus_sdk import ConfidentialAppAuthClient
import json
import sys
import pprint
CLIENT_CRED_FILE='/home/secrets/oauth2/client_secret_globus.json'
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {me} access_token".format(me=sys.argv[0]))
sys.exit(1)
token = sys.argv[1]
f=open(CLIENT_CRED_FILE, 'r')
config=json.load(f)
f.close()
client_id = config['web'].get('client_id')
client_secret = config['web'].get('client_secret')
client = ConfidentialAppAuthClient(client_id, client_secret)
print("Using client id '{client_id}'\n".format(client_id=client_id))
if client.oauth2_validate_token(token).data.get('active') != True:
print "not an active token"
sys.exit(1)
introspect_response = client.oauth2_token_introspect(token)
print "token scope is '{scope}'\n".format(scope=introspect_response.get('scope'))
print "dependent token response is:"
pprint.pprint(client.oauth2_get_dependent_tokens(token).data)
|
Add utility to examine depedent tokens retrieved from Globus.
|
Add utility to examine depedent tokens retrieved from Globus.
|
Python
|
apache-2.0
|
informatics-isi-edu/webauthn,informatics-isi-edu/webauthn,informatics-isi-edu/webauthn
|
Add utility to examine depedent tokens retrieved from Globus.
|
from globus_sdk import ConfidentialAppAuthClient
import json
import sys
import pprint
CLIENT_CRED_FILE='/home/secrets/oauth2/client_secret_globus.json'
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {me} access_token".format(me=sys.argv[0]))
sys.exit(1)
token = sys.argv[1]
f=open(CLIENT_CRED_FILE, 'r')
config=json.load(f)
f.close()
client_id = config['web'].get('client_id')
client_secret = config['web'].get('client_secret')
client = ConfidentialAppAuthClient(client_id, client_secret)
print("Using client id '{client_id}'\n".format(client_id=client_id))
if client.oauth2_validate_token(token).data.get('active') != True:
print "not an active token"
sys.exit(1)
introspect_response = client.oauth2_token_introspect(token)
print "token scope is '{scope}'\n".format(scope=introspect_response.get('scope'))
print "dependent token response is:"
pprint.pprint(client.oauth2_get_dependent_tokens(token).data)
|
<commit_before><commit_msg>Add utility to examine depedent tokens retrieved from Globus.<commit_after>
|
from globus_sdk import ConfidentialAppAuthClient
import json
import sys
import pprint
CLIENT_CRED_FILE='/home/secrets/oauth2/client_secret_globus.json'
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {me} access_token".format(me=sys.argv[0]))
sys.exit(1)
token = sys.argv[1]
f=open(CLIENT_CRED_FILE, 'r')
config=json.load(f)
f.close()
client_id = config['web'].get('client_id')
client_secret = config['web'].get('client_secret')
client = ConfidentialAppAuthClient(client_id, client_secret)
print("Using client id '{client_id}'\n".format(client_id=client_id))
if client.oauth2_validate_token(token).data.get('active') != True:
print "not an active token"
sys.exit(1)
introspect_response = client.oauth2_token_introspect(token)
print "token scope is '{scope}'\n".format(scope=introspect_response.get('scope'))
print "dependent token response is:"
pprint.pprint(client.oauth2_get_dependent_tokens(token).data)
|
Add utility to examine depedent tokens retrieved from Globus.from globus_sdk import ConfidentialAppAuthClient
import json
import sys
import pprint
CLIENT_CRED_FILE='/home/secrets/oauth2/client_secret_globus.json'
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {me} access_token".format(me=sys.argv[0]))
sys.exit(1)
token = sys.argv[1]
f=open(CLIENT_CRED_FILE, 'r')
config=json.load(f)
f.close()
client_id = config['web'].get('client_id')
client_secret = config['web'].get('client_secret')
client = ConfidentialAppAuthClient(client_id, client_secret)
print("Using client id '{client_id}'\n".format(client_id=client_id))
if client.oauth2_validate_token(token).data.get('active') != True:
print "not an active token"
sys.exit(1)
introspect_response = client.oauth2_token_introspect(token)
print "token scope is '{scope}'\n".format(scope=introspect_response.get('scope'))
print "dependent token response is:"
pprint.pprint(client.oauth2_get_dependent_tokens(token).data)
|
<commit_before><commit_msg>Add utility to examine depedent tokens retrieved from Globus.<commit_after>from globus_sdk import ConfidentialAppAuthClient
import json
import sys
import pprint
CLIENT_CRED_FILE='/home/secrets/oauth2/client_secret_globus.json'
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {me} access_token".format(me=sys.argv[0]))
sys.exit(1)
token = sys.argv[1]
f=open(CLIENT_CRED_FILE, 'r')
config=json.load(f)
f.close()
client_id = config['web'].get('client_id')
client_secret = config['web'].get('client_secret')
client = ConfidentialAppAuthClient(client_id, client_secret)
print("Using client id '{client_id}'\n".format(client_id=client_id))
if client.oauth2_validate_token(token).data.get('active') != True:
print "not an active token"
sys.exit(1)
introspect_response = client.oauth2_token_introspect(token)
print "token scope is '{scope}'\n".format(scope=introspect_response.get('scope'))
print "dependent token response is:"
pprint.pprint(client.oauth2_get_dependent_tokens(token).data)
|
|
6364ce646e06d1168b8b96f27c8a7df2e2f0f23b
|
neutron/pecan_wsgi/hooks/translation.py
|
neutron/pecan_wsgi/hooks/translation.py
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occured."))
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occurred."))
|
Fix the bug of "Spelling error of a word"
|
Fix the bug of "Spelling error of a word"
The word "occured" should be spelled as "occurred".
So it is changed.
Change-Id: Ice5212dc8565edb0c5b5c55f979b27440eeeb9aa
Closes-Bug: #1505043
|
Python
|
apache-2.0
|
chitr/neutron,asgard-lab/neutron,eayunstack/neutron,noironetworks/neutron,chitr/neutron,mahak/neutron,yanheven/neutron,openstack/neutron,cloudbase/neutron,sebrandon1/neutron,bigswitch/neutron,bigswitch/neutron,huntxu/neutron,MaximNevrov/neutron,asgard-lab/neutron,dims/neutron,wolverineav/neutron,wolverineav/neutron,mahak/neutron,klmitch/neutron,noironetworks/neutron,openstack/neutron,cloudbase/neutron,jumpojoy/neutron,klmitch/neutron,dims/neutron,mahak/neutron,igor-toga/local-snat,huntxu/neutron,apporc/neutron,MaximNevrov/neutron,glove747/liberty-neutron,jumpojoy/neutron,yanheven/neutron,glove747/liberty-neutron,sebrandon1/neutron,igor-toga/local-snat,eayunstack/neutron,apporc/neutron,openstack/neutron
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occured."))
Fix the bug of "Spelling error of a word"
The word "occured" should be spelled as "occurred".
So it is changed.
Change-Id: Ice5212dc8565edb0c5b5c55f979b27440eeeb9aa
Closes-Bug: #1505043
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occurred."))
|
<commit_before># Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occured."))
<commit_msg>Fix the bug of "Spelling error of a word"
The word "occured" should be spelled as "occurred".
So it is changed.
Change-Id: Ice5212dc8565edb0c5b5c55f979b27440eeeb9aa
Closes-Bug: #1505043<commit_after>
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occurred."))
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occured."))
Fix the bug of "Spelling error of a word"
The word "occured" should be spelled as "occurred".
So it is changed.
Change-Id: Ice5212dc8565edb0c5b5c55f979b27440eeeb9aa
Closes-Bug: #1505043# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occurred."))
|
<commit_before># Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occured."))
<commit_msg>Fix the bug of "Spelling error of a word"
The word "occured" should be spelled as "occurred".
So it is changed.
Change-Id: Ice5212dc8565edb0c5b5c55f979b27440eeeb9aa
Closes-Bug: #1505043<commit_after># Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from pecan import hooks
import webob.exc
from neutron.api.v2 import base as v2base
LOG = logging.getLogger(__name__)
class ExceptionTranslationHook(hooks.PecanHook):
def on_error(self, state, e):
# if it's already an http error, just return to let it go through
if isinstance(e, webob.exc.WSGIHTTPException):
return
for exc_class, to_class in v2base.FAULT_MAP.items():
if isinstance(e, exc_class):
raise to_class(getattr(e, 'msg', e.message))
# leaked unexpected exception, convert to boring old 500 error and
# hide message from user in case it contained sensitive details
LOG.exception(_("An unexpected exception was caught: %s") % e)
raise webob.exc.HTTPInternalServerError(
_("An unexpected internal error occurred."))
|
01b84f1b2a73ba31d6304baa0e08415657c72dca
|
website/tests/models/test_cancer.py
|
website/tests/models/test_cancer.py
|
from database import db
from model_testing import ModelTest
from models import Cancer
from sqlalchemy.exc import IntegrityError
import pytest
class CancerTest(ModelTest):
def test_init(self):
cancer = Cancer(name='Bladder Urothelial Carcinoma', code='BLCA')
assert cancer.name == 'Bladder Urothelial Carcinoma'
assert cancer.code == 'BLCA'
db.session.add(cancer)
the_same_code = Cancer(name='Colon adenocarcinoma', code='BLCA')
the_same_name = Cancer(name='Bladder Urothelial Carcinoma', code='BRCA')
with pytest.raises(IntegrityError):
db.session.add(the_same_code)
db.session.commit()
# return to previous state, cancer needs to be re-added
db.session.rollback()
db.session.add(cancer)
with pytest.raises(IntegrityError):
db.session.add(the_same_name)
db.session.commit()
|
Add tests for cancer model init
|
Add tests for cancer model init
|
Python
|
lgpl-2.1
|
reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB
|
Add tests for cancer model init
|
from database import db
from model_testing import ModelTest
from models import Cancer
from sqlalchemy.exc import IntegrityError
import pytest
class CancerTest(ModelTest):
def test_init(self):
cancer = Cancer(name='Bladder Urothelial Carcinoma', code='BLCA')
assert cancer.name == 'Bladder Urothelial Carcinoma'
assert cancer.code == 'BLCA'
db.session.add(cancer)
the_same_code = Cancer(name='Colon adenocarcinoma', code='BLCA')
the_same_name = Cancer(name='Bladder Urothelial Carcinoma', code='BRCA')
with pytest.raises(IntegrityError):
db.session.add(the_same_code)
db.session.commit()
# return to previous state, cancer needs to be re-added
db.session.rollback()
db.session.add(cancer)
with pytest.raises(IntegrityError):
db.session.add(the_same_name)
db.session.commit()
|
<commit_before><commit_msg>Add tests for cancer model init<commit_after>
|
from database import db
from model_testing import ModelTest
from models import Cancer
from sqlalchemy.exc import IntegrityError
import pytest
class CancerTest(ModelTest):
def test_init(self):
cancer = Cancer(name='Bladder Urothelial Carcinoma', code='BLCA')
assert cancer.name == 'Bladder Urothelial Carcinoma'
assert cancer.code == 'BLCA'
db.session.add(cancer)
the_same_code = Cancer(name='Colon adenocarcinoma', code='BLCA')
the_same_name = Cancer(name='Bladder Urothelial Carcinoma', code='BRCA')
with pytest.raises(IntegrityError):
db.session.add(the_same_code)
db.session.commit()
# return to previous state, cancer needs to be re-added
db.session.rollback()
db.session.add(cancer)
with pytest.raises(IntegrityError):
db.session.add(the_same_name)
db.session.commit()
|
Add tests for cancer model initfrom database import db
from model_testing import ModelTest
from models import Cancer
from sqlalchemy.exc import IntegrityError
import pytest
class CancerTest(ModelTest):
def test_init(self):
cancer = Cancer(name='Bladder Urothelial Carcinoma', code='BLCA')
assert cancer.name == 'Bladder Urothelial Carcinoma'
assert cancer.code == 'BLCA'
db.session.add(cancer)
the_same_code = Cancer(name='Colon adenocarcinoma', code='BLCA')
the_same_name = Cancer(name='Bladder Urothelial Carcinoma', code='BRCA')
with pytest.raises(IntegrityError):
db.session.add(the_same_code)
db.session.commit()
# return to previous state, cancer needs to be re-added
db.session.rollback()
db.session.add(cancer)
with pytest.raises(IntegrityError):
db.session.add(the_same_name)
db.session.commit()
|
<commit_before><commit_msg>Add tests for cancer model init<commit_after>from database import db
from model_testing import ModelTest
from models import Cancer
from sqlalchemy.exc import IntegrityError
import pytest
class CancerTest(ModelTest):
def test_init(self):
cancer = Cancer(name='Bladder Urothelial Carcinoma', code='BLCA')
assert cancer.name == 'Bladder Urothelial Carcinoma'
assert cancer.code == 'BLCA'
db.session.add(cancer)
the_same_code = Cancer(name='Colon adenocarcinoma', code='BLCA')
the_same_name = Cancer(name='Bladder Urothelial Carcinoma', code='BRCA')
with pytest.raises(IntegrityError):
db.session.add(the_same_code)
db.session.commit()
# return to previous state, cancer needs to be re-added
db.session.rollback()
db.session.add(cancer)
with pytest.raises(IntegrityError):
db.session.add(the_same_name)
db.session.commit()
|
|
c625c9d8918eab3e86ac14abd674b6232c2fa8e9
|
django/sierra/base/migrations/0002_auto_20170602_1115.py
|
django/sierra/base/migrations/0002_auto_20170602_1115.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from utils.load_data import load_data
class Migration(migrations.Migration):
dependencies = [
('base', '0001_squashed_0009_auto_20170602_1057'),
]
operations = [
migrations.RunPython(
load_data('base/migrations/data/metadatafixtures.json', 'sierra')),
migrations.RunPython(
load_data('base/migrations/data/bibfixtures.json', 'sierra')),
]
|
Add data migrations for base app
|
Add data migrations for base app
|
Python
|
bsd-3-clause
|
unt-libraries/catalog-api,unt-libraries/catalog-api,unt-libraries/catalog-api,unt-libraries/catalog-api
|
Add data migrations for base app
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from utils.load_data import load_data
class Migration(migrations.Migration):
dependencies = [
('base', '0001_squashed_0009_auto_20170602_1057'),
]
operations = [
migrations.RunPython(
load_data('base/migrations/data/metadatafixtures.json', 'sierra')),
migrations.RunPython(
load_data('base/migrations/data/bibfixtures.json', 'sierra')),
]
|
<commit_before><commit_msg>Add data migrations for base app<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from utils.load_data import load_data
class Migration(migrations.Migration):
dependencies = [
('base', '0001_squashed_0009_auto_20170602_1057'),
]
operations = [
migrations.RunPython(
load_data('base/migrations/data/metadatafixtures.json', 'sierra')),
migrations.RunPython(
load_data('base/migrations/data/bibfixtures.json', 'sierra')),
]
|
Add data migrations for base app# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from utils.load_data import load_data
class Migration(migrations.Migration):
dependencies = [
('base', '0001_squashed_0009_auto_20170602_1057'),
]
operations = [
migrations.RunPython(
load_data('base/migrations/data/metadatafixtures.json', 'sierra')),
migrations.RunPython(
load_data('base/migrations/data/bibfixtures.json', 'sierra')),
]
|
<commit_before><commit_msg>Add data migrations for base app<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from utils.load_data import load_data
class Migration(migrations.Migration):
dependencies = [
('base', '0001_squashed_0009_auto_20170602_1057'),
]
operations = [
migrations.RunPython(
load_data('base/migrations/data/metadatafixtures.json', 'sierra')),
migrations.RunPython(
load_data('base/migrations/data/bibfixtures.json', 'sierra')),
]
|
|
25417369765087482f0d02b06913b3cffe9f43ad
|
tests/unit/test_secret.py
|
tests/unit/test_secret.py
|
# Import libnacl libs
import libnacl.secret
# Import python libs
import unittest
class TestSecret(unittest.TestCase):
'''
'''
def test_secret(self):
msg = 'But then of course African swallows are not migratory.'
box = libnacl.secret.SecretBox()
ctxt = box.encrypt(msg)
self.assertNotEqual(msg, ctxt)
box2 = libnacl.secret.SecretBox(box.sk)
clear1 = box.decrypt(ctxt)
self.assertEqual(msg, clear1)
clear2 = box2.decrypt(ctxt)
self.assertEqual(clear1, clear2)
ctxt2 = box2.encrypt(msg)
clear3 = box.decrypt(ctxt2)
self.assertEqual(clear3, msg)
|
Add high level secret tests
|
Add high level secret tests
|
Python
|
apache-2.0
|
johnttan/libnacl,cachedout/libnacl,mindw/libnacl,saltstack/libnacl,coinkite/libnacl,RaetProtocol/libnacl
|
Add high level secret tests
|
# Import libnacl libs
import libnacl.secret
# Import python libs
import unittest
class TestSecret(unittest.TestCase):
'''
'''
def test_secret(self):
msg = 'But then of course African swallows are not migratory.'
box = libnacl.secret.SecretBox()
ctxt = box.encrypt(msg)
self.assertNotEqual(msg, ctxt)
box2 = libnacl.secret.SecretBox(box.sk)
clear1 = box.decrypt(ctxt)
self.assertEqual(msg, clear1)
clear2 = box2.decrypt(ctxt)
self.assertEqual(clear1, clear2)
ctxt2 = box2.encrypt(msg)
clear3 = box.decrypt(ctxt2)
self.assertEqual(clear3, msg)
|
<commit_before><commit_msg>Add high level secret tests<commit_after>
|
# Import libnacl libs
import libnacl.secret
# Import python libs
import unittest
class TestSecret(unittest.TestCase):
'''
'''
def test_secret(self):
msg = 'But then of course African swallows are not migratory.'
box = libnacl.secret.SecretBox()
ctxt = box.encrypt(msg)
self.assertNotEqual(msg, ctxt)
box2 = libnacl.secret.SecretBox(box.sk)
clear1 = box.decrypt(ctxt)
self.assertEqual(msg, clear1)
clear2 = box2.decrypt(ctxt)
self.assertEqual(clear1, clear2)
ctxt2 = box2.encrypt(msg)
clear3 = box.decrypt(ctxt2)
self.assertEqual(clear3, msg)
|
Add high level secret tests# Import libnacl libs
import libnacl.secret
# Import python libs
import unittest
class TestSecret(unittest.TestCase):
'''
'''
def test_secret(self):
msg = 'But then of course African swallows are not migratory.'
box = libnacl.secret.SecretBox()
ctxt = box.encrypt(msg)
self.assertNotEqual(msg, ctxt)
box2 = libnacl.secret.SecretBox(box.sk)
clear1 = box.decrypt(ctxt)
self.assertEqual(msg, clear1)
clear2 = box2.decrypt(ctxt)
self.assertEqual(clear1, clear2)
ctxt2 = box2.encrypt(msg)
clear3 = box.decrypt(ctxt2)
self.assertEqual(clear3, msg)
|
<commit_before><commit_msg>Add high level secret tests<commit_after># Import libnacl libs
import libnacl.secret
# Import python libs
import unittest
class TestSecret(unittest.TestCase):
'''
'''
def test_secret(self):
msg = 'But then of course African swallows are not migratory.'
box = libnacl.secret.SecretBox()
ctxt = box.encrypt(msg)
self.assertNotEqual(msg, ctxt)
box2 = libnacl.secret.SecretBox(box.sk)
clear1 = box.decrypt(ctxt)
self.assertEqual(msg, clear1)
clear2 = box2.decrypt(ctxt)
self.assertEqual(clear1, clear2)
ctxt2 = box2.encrypt(msg)
clear3 = box.decrypt(ctxt2)
self.assertEqual(clear3, msg)
|
|
57a58893a2ba94b174b06e7f5f63478dff1e879e
|
providers/popularity/netflix.py
|
providers/popularity/netflix.py
|
from providers.popularity.provider import PopularityProvider
from utils.torrent_util import remove_bad_torrent_matches, torrent_to_movie
IDENTIFIER = "netflix"
class Provider(PopularityProvider):
def get_popular(self):
country = "se"
url = f"https://www.finder.com/{country}/netflix-movies"
data = self.parse_html(url, 'tbody td[data-title="Title"] b, tbody td[data-title="Year of release"]', cache=False)
movies = [
{
"name": movie,
"is_bad": False,
"year": year,
}
for movie, year in zip(data[::2], data[1::2])
]
return movies
|
Add provider for (Swedish) Netflix.
|
Add provider for (Swedish) Netflix.
Change "country" inside the provider for the movies available in a different country.
|
Python
|
mit
|
EmilStenstrom/nephele
|
Add provider for (Swedish) Netflix.
Change "country" inside the provider for the movies available in a different country.
|
from providers.popularity.provider import PopularityProvider
from utils.torrent_util import remove_bad_torrent_matches, torrent_to_movie
IDENTIFIER = "netflix"
class Provider(PopularityProvider):
def get_popular(self):
country = "se"
url = f"https://www.finder.com/{country}/netflix-movies"
data = self.parse_html(url, 'tbody td[data-title="Title"] b, tbody td[data-title="Year of release"]', cache=False)
movies = [
{
"name": movie,
"is_bad": False,
"year": year,
}
for movie, year in zip(data[::2], data[1::2])
]
return movies
|
<commit_before><commit_msg>Add provider for (Swedish) Netflix.
Change "country" inside the provider for the movies available in a different country.<commit_after>
|
from providers.popularity.provider import PopularityProvider
from utils.torrent_util import remove_bad_torrent_matches, torrent_to_movie
IDENTIFIER = "netflix"
class Provider(PopularityProvider):
def get_popular(self):
country = "se"
url = f"https://www.finder.com/{country}/netflix-movies"
data = self.parse_html(url, 'tbody td[data-title="Title"] b, tbody td[data-title="Year of release"]', cache=False)
movies = [
{
"name": movie,
"is_bad": False,
"year": year,
}
for movie, year in zip(data[::2], data[1::2])
]
return movies
|
Add provider for (Swedish) Netflix.
Change "country" inside the provider for the movies available in a different country.from providers.popularity.provider import PopularityProvider
from utils.torrent_util import remove_bad_torrent_matches, torrent_to_movie
IDENTIFIER = "netflix"
class Provider(PopularityProvider):
def get_popular(self):
country = "se"
url = f"https://www.finder.com/{country}/netflix-movies"
data = self.parse_html(url, 'tbody td[data-title="Title"] b, tbody td[data-title="Year of release"]', cache=False)
movies = [
{
"name": movie,
"is_bad": False,
"year": year,
}
for movie, year in zip(data[::2], data[1::2])
]
return movies
|
<commit_before><commit_msg>Add provider for (Swedish) Netflix.
Change "country" inside the provider for the movies available in a different country.<commit_after>from providers.popularity.provider import PopularityProvider
from utils.torrent_util import remove_bad_torrent_matches, torrent_to_movie
IDENTIFIER = "netflix"
class Provider(PopularityProvider):
def get_popular(self):
country = "se"
url = f"https://www.finder.com/{country}/netflix-movies"
data = self.parse_html(url, 'tbody td[data-title="Title"] b, tbody td[data-title="Year of release"]', cache=False)
movies = [
{
"name": movie,
"is_bad": False,
"year": year,
}
for movie, year in zip(data[::2], data[1::2])
]
return movies
|
|
96526df63b2d45f08c7f007b45ee793e3ccc97a3
|
array/rotate-image.py
|
array/rotate-image.py
|
# You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees clockwise
# solve with O(1) additional memory
def rotate_image(a):
n = len(a)
if a is None or n < 1:
return a
else:
for i in range(n/2):
for j in range(n-i-1):
temp = a[i][j]
a[i][j] = a[n-1-j][i]
a[n-1-j][i] = a[n-1-i][n-1-j]
a[n-1-i][n-1-j] = a[j][n-1-i]
a[j][n-1-i] = temp
return a
|
Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging
|
Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging
|
Python
|
mit
|
derekmpham/interview-prep,derekmpham/interview-prep
|
Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging
|
# You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees clockwise
# solve with O(1) additional memory
def rotate_image(a):
n = len(a)
if a is None or n < 1:
return a
else:
for i in range(n/2):
for j in range(n-i-1):
temp = a[i][j]
a[i][j] = a[n-1-j][i]
a[n-1-j][i] = a[n-1-i][n-1-j]
a[n-1-i][n-1-j] = a[j][n-1-i]
a[j][n-1-i] = temp
return a
|
<commit_before><commit_msg>Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging<commit_after>
|
# You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees clockwise
# solve with O(1) additional memory
def rotate_image(a):
n = len(a)
if a is None or n < 1:
return a
else:
for i in range(n/2):
for j in range(n-i-1):
temp = a[i][j]
a[i][j] = a[n-1-j][i]
a[n-1-j][i] = a[n-1-i][n-1-j]
a[n-1-i][n-1-j] = a[j][n-1-i]
a[j][n-1-i] = temp
return a
|
Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging# You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees clockwise
# solve with O(1) additional memory
def rotate_image(a):
n = len(a)
if a is None or n < 1:
return a
else:
for i in range(n/2):
for j in range(n-i-1):
temp = a[i][j]
a[i][j] = a[n-1-j][i]
a[n-1-j][i] = a[n-1-i][n-1-j]
a[n-1-i][n-1-j] = a[j][n-1-i]
a[j][n-1-i] = temp
return a
|
<commit_before><commit_msg>Write rotate 90 degrees nxn 2d matrix algorithm: currently debugging<commit_after># You are given an n x n 2D matrix that represents an image. Rotate the image by 90 degrees clockwise
# solve with O(1) additional memory
def rotate_image(a):
n = len(a)
if a is None or n < 1:
return a
else:
for i in range(n/2):
for j in range(n-i-1):
temp = a[i][j]
a[i][j] = a[n-1-j][i]
a[n-1-j][i] = a[n-1-i][n-1-j]
a[n-1-i][n-1-j] = a[j][n-1-i]
a[j][n-1-i] = temp
return a
|
|
6e1f1d900d77f1352eb941f7ac569a0c681c8dc1
|
loop_write.py
|
loop_write.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import random
import time
from automaton_client import AutomatonClient
SERVER_HOST = "localhost"
SERVER_PORT = 1502
SLEEP_INTERVAL = 5
# sets on only 6 randomly chosen panels
def random6():
regs_values = [0] * 23
indices = []
while len(indices) < 6:
val = random.randint(0, 22)
print val
if val not in indices:
indices.append(val)
regs_values[val] = 100
return regs_values
try:
c = AutomatonClient(host=SERVER_HOST, port=SERVER_PORT)
# open or reconnect TCP to server
if not c.is_open():
if not c.open():
print("unable to connect to %s:%s" % (SERVER_HOST, SERVER_PORT))
if c.is_open():
while True:
result = c.write_all(random6())
if result:
print("WROTE 23 regs from ad #12389")
time.sleep(SLEEP_INTERVAL)
c.close()
except ValueError:
print("Error with host or port params")
except (KeyboardInterrupt, SystemExit):
# interrupting this script sets all panels to off
c.clear_all()
c.close()
|
Add new script to loop on randomly writing registers
|
Add new script to loop on randomly writing registers
|
Python
|
mit
|
vandaele/light-automation,vandaele/light-automation
|
Add new script to loop on randomly writing registers
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import random
import time
from automaton_client import AutomatonClient
SERVER_HOST = "localhost"
SERVER_PORT = 1502
SLEEP_INTERVAL = 5
# sets on only 6 randomly chosen panels
def random6():
regs_values = [0] * 23
indices = []
while len(indices) < 6:
val = random.randint(0, 22)
print val
if val not in indices:
indices.append(val)
regs_values[val] = 100
return regs_values
try:
c = AutomatonClient(host=SERVER_HOST, port=SERVER_PORT)
# open or reconnect TCP to server
if not c.is_open():
if not c.open():
print("unable to connect to %s:%s" % (SERVER_HOST, SERVER_PORT))
if c.is_open():
while True:
result = c.write_all(random6())
if result:
print("WROTE 23 regs from ad #12389")
time.sleep(SLEEP_INTERVAL)
c.close()
except ValueError:
print("Error with host or port params")
except (KeyboardInterrupt, SystemExit):
# interrupting this script sets all panels to off
c.clear_all()
c.close()
|
<commit_before><commit_msg>Add new script to loop on randomly writing registers<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import random
import time
from automaton_client import AutomatonClient
SERVER_HOST = "localhost"
SERVER_PORT = 1502
SLEEP_INTERVAL = 5
# sets on only 6 randomly chosen panels
def random6():
regs_values = [0] * 23
indices = []
while len(indices) < 6:
val = random.randint(0, 22)
print val
if val not in indices:
indices.append(val)
regs_values[val] = 100
return regs_values
try:
c = AutomatonClient(host=SERVER_HOST, port=SERVER_PORT)
# open or reconnect TCP to server
if not c.is_open():
if not c.open():
print("unable to connect to %s:%s" % (SERVER_HOST, SERVER_PORT))
if c.is_open():
while True:
result = c.write_all(random6())
if result:
print("WROTE 23 regs from ad #12389")
time.sleep(SLEEP_INTERVAL)
c.close()
except ValueError:
print("Error with host or port params")
except (KeyboardInterrupt, SystemExit):
# interrupting this script sets all panels to off
c.clear_all()
c.close()
|
Add new script to loop on randomly writing registers#! /usr/bin/env python
# -*- coding:utf-8 -*-
import random
import time
from automaton_client import AutomatonClient
SERVER_HOST = "localhost"
SERVER_PORT = 1502
SLEEP_INTERVAL = 5
# sets on only 6 randomly chosen panels
def random6():
regs_values = [0] * 23
indices = []
while len(indices) < 6:
val = random.randint(0, 22)
print val
if val not in indices:
indices.append(val)
regs_values[val] = 100
return regs_values
try:
c = AutomatonClient(host=SERVER_HOST, port=SERVER_PORT)
# open or reconnect TCP to server
if not c.is_open():
if not c.open():
print("unable to connect to %s:%s" % (SERVER_HOST, SERVER_PORT))
if c.is_open():
while True:
result = c.write_all(random6())
if result:
print("WROTE 23 regs from ad #12389")
time.sleep(SLEEP_INTERVAL)
c.close()
except ValueError:
print("Error with host or port params")
except (KeyboardInterrupt, SystemExit):
# interrupting this script sets all panels to off
c.clear_all()
c.close()
|
<commit_before><commit_msg>Add new script to loop on randomly writing registers<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
import random
import time
from automaton_client import AutomatonClient
SERVER_HOST = "localhost"
SERVER_PORT = 1502
SLEEP_INTERVAL = 5
# sets on only 6 randomly chosen panels
def random6():
regs_values = [0] * 23
indices = []
while len(indices) < 6:
val = random.randint(0, 22)
print val
if val not in indices:
indices.append(val)
regs_values[val] = 100
return regs_values
try:
c = AutomatonClient(host=SERVER_HOST, port=SERVER_PORT)
# open or reconnect TCP to server
if not c.is_open():
if not c.open():
print("unable to connect to %s:%s" % (SERVER_HOST, SERVER_PORT))
if c.is_open():
while True:
result = c.write_all(random6())
if result:
print("WROTE 23 regs from ad #12389")
time.sleep(SLEEP_INTERVAL)
c.close()
except ValueError:
print("Error with host or port params")
except (KeyboardInterrupt, SystemExit):
# interrupting this script sets all panels to off
c.clear_all()
c.close()
|
|
a3b72336c5f04dfdc91b4d296adf669c9e3bf355
|
txircd/modules/umode_s.py
|
txircd/modules/umode_s.py
|
from txircd.modbase import Mode
class ServerNoticeMode(Mode):
pass
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"modes": {
"uns": ServerNoticeMode()
}
}
def cleanup(self):
self.ircd.removeMode("uns")
|
Implement usermode +s (currently doesn't do anything)
|
Implement usermode +s (currently doesn't do anything)
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd,DesertBus/txircd
|
Implement usermode +s (currently doesn't do anything)
|
from txircd.modbase import Mode
class ServerNoticeMode(Mode):
pass
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"modes": {
"uns": ServerNoticeMode()
}
}
def cleanup(self):
self.ircd.removeMode("uns")
|
<commit_before><commit_msg>Implement usermode +s (currently doesn't do anything)<commit_after>
|
from txircd.modbase import Mode
class ServerNoticeMode(Mode):
pass
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"modes": {
"uns": ServerNoticeMode()
}
}
def cleanup(self):
self.ircd.removeMode("uns")
|
Implement usermode +s (currently doesn't do anything)from txircd.modbase import Mode
class ServerNoticeMode(Mode):
pass
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"modes": {
"uns": ServerNoticeMode()
}
}
def cleanup(self):
self.ircd.removeMode("uns")
|
<commit_before><commit_msg>Implement usermode +s (currently doesn't do anything)<commit_after>from txircd.modbase import Mode
class ServerNoticeMode(Mode):
pass
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"modes": {
"uns": ServerNoticeMode()
}
}
def cleanup(self):
self.ircd.removeMode("uns")
|
|
af9ec9b12f5111cf6b2352ca9efc147c86093024
|
Problems/compressString.py
|
Problems/compressString.py
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [None, '', 'AABBCC', 'AAABCCDDDD']
results = [None, '', 'AABBCC', 'A3BCCD4']
for i in range(len(tests)):
temp_result = compress_string(tests[i])
if temp_result == results[i]:
print('PASS: {} returned {}'.format(tests[i], temp_result))
else:
print('FAIL: {} returned {}, should have returned {}'.format(tests[i], temp_result, results[i]))
return 0
def compress_string(string):
'''
Compresses a string such that 'AAABCCDDDD' becomes 'A3BCCD4'. Only compresses the string if it saves space ('AABBCC' stays same).
Input: string
Output: string
'''
if string is None:
return None
# Check length
s_length = len(string)
if s_length == 0:
return string
# Create compressed string
compressed = []
count = 1
base_char = string[0]
for i in range(1, s_length):
# Current char is different than last one
if string[i] != base_char:
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
# Change base_char to new character and reset count
base_char = string[i]
count = 1
# Current char is same as last one
else:
count += 1
# Append the last set of chars
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
if len(compressed) >= s_length:
return string
else:
return ''.join(compressed)
if __name__ == '__main__':
main()
|
Add string compression problem and tests.
|
Add string compression problem and tests.
|
Python
|
mit
|
HKuz/Test_Code
|
Add string compression problem and tests.
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [None, '', 'AABBCC', 'AAABCCDDDD']
results = [None, '', 'AABBCC', 'A3BCCD4']
for i in range(len(tests)):
temp_result = compress_string(tests[i])
if temp_result == results[i]:
print('PASS: {} returned {}'.format(tests[i], temp_result))
else:
print('FAIL: {} returned {}, should have returned {}'.format(tests[i], temp_result, results[i]))
return 0
def compress_string(string):
'''
Compresses a string such that 'AAABCCDDDD' becomes 'A3BCCD4'. Only compresses the string if it saves space ('AABBCC' stays same).
Input: string
Output: string
'''
if string is None:
return None
# Check length
s_length = len(string)
if s_length == 0:
return string
# Create compressed string
compressed = []
count = 1
base_char = string[0]
for i in range(1, s_length):
# Current char is different than last one
if string[i] != base_char:
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
# Change base_char to new character and reset count
base_char = string[i]
count = 1
# Current char is same as last one
else:
count += 1
# Append the last set of chars
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
if len(compressed) >= s_length:
return string
else:
return ''.join(compressed)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add string compression problem and tests.<commit_after>
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [None, '', 'AABBCC', 'AAABCCDDDD']
results = [None, '', 'AABBCC', 'A3BCCD4']
for i in range(len(tests)):
temp_result = compress_string(tests[i])
if temp_result == results[i]:
print('PASS: {} returned {}'.format(tests[i], temp_result))
else:
print('FAIL: {} returned {}, should have returned {}'.format(tests[i], temp_result, results[i]))
return 0
def compress_string(string):
'''
Compresses a string such that 'AAABCCDDDD' becomes 'A3BCCD4'. Only compresses the string if it saves space ('AABBCC' stays same).
Input: string
Output: string
'''
if string is None:
return None
# Check length
s_length = len(string)
if s_length == 0:
return string
# Create compressed string
compressed = []
count = 1
base_char = string[0]
for i in range(1, s_length):
# Current char is different than last one
if string[i] != base_char:
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
# Change base_char to new character and reset count
base_char = string[i]
count = 1
# Current char is same as last one
else:
count += 1
# Append the last set of chars
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
if len(compressed) >= s_length:
return string
else:
return ''.join(compressed)
if __name__ == '__main__':
main()
|
Add string compression problem and tests.#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [None, '', 'AABBCC', 'AAABCCDDDD']
results = [None, '', 'AABBCC', 'A3BCCD4']
for i in range(len(tests)):
temp_result = compress_string(tests[i])
if temp_result == results[i]:
print('PASS: {} returned {}'.format(tests[i], temp_result))
else:
print('FAIL: {} returned {}, should have returned {}'.format(tests[i], temp_result, results[i]))
return 0
def compress_string(string):
'''
Compresses a string such that 'AAABCCDDDD' becomes 'A3BCCD4'. Only compresses the string if it saves space ('AABBCC' stays same).
Input: string
Output: string
'''
if string is None:
return None
# Check length
s_length = len(string)
if s_length == 0:
return string
# Create compressed string
compressed = []
count = 1
base_char = string[0]
for i in range(1, s_length):
# Current char is different than last one
if string[i] != base_char:
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
# Change base_char to new character and reset count
base_char = string[i]
count = 1
# Current char is same as last one
else:
count += 1
# Append the last set of chars
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
if len(compressed) >= s_length:
return string
else:
return ''.join(compressed)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add string compression problem and tests.<commit_after>#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [None, '', 'AABBCC', 'AAABCCDDDD']
results = [None, '', 'AABBCC', 'A3BCCD4']
for i in range(len(tests)):
temp_result = compress_string(tests[i])
if temp_result == results[i]:
print('PASS: {} returned {}'.format(tests[i], temp_result))
else:
print('FAIL: {} returned {}, should have returned {}'.format(tests[i], temp_result, results[i]))
return 0
def compress_string(string):
'''
Compresses a string such that 'AAABCCDDDD' becomes 'A3BCCD4'. Only compresses the string if it saves space ('AABBCC' stays same).
Input: string
Output: string
'''
if string is None:
return None
# Check length
s_length = len(string)
if s_length == 0:
return string
# Create compressed string
compressed = []
count = 1
base_char = string[0]
for i in range(1, s_length):
# Current char is different than last one
if string[i] != base_char:
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
# Change base_char to new character and reset count
base_char = string[i]
count = 1
# Current char is same as last one
else:
count += 1
# Append the last set of chars
compressed.append(base_char)
if count == 2:
compressed.append(base_char)
elif count > 2:
compressed.append(str(count))
if len(compressed) >= s_length:
return string
else:
return ''.join(compressed)
if __name__ == '__main__':
main()
|
|
3061099d72364eb6d6aa36613b2a425fc3f99915
|
furl/__init__.py
|
furl/__init__.py
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.9'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.91'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
Increment version to v0.3.91 for PyPi.
|
Increment version to v0.3.91 for PyPi.
|
Python
|
unlicense
|
Gerhut/furl,lastfm/furl,penyatree/furl,guiquanz/furl
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.9'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
Increment version to v0.3.91 for PyPi.
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.91'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
<commit_before>#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.9'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
<commit_msg>Increment version to v0.3.91 for PyPi.<commit_after>
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.91'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.9'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
Increment version to v0.3.91 for PyPi.#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.91'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
<commit_before>#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.9'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
<commit_msg>Increment version to v0.3.91 for PyPi.<commit_after>#
# furl - URL manipulation made simple.
#
# Arthur Grunseid
# grunseid.com
# grunseid@gmail.com
#
# License: Build Amazing Things (Unlicense)
__title__ = 'furl'
__version__ = '0.3.91'
__license__ = 'Unlicense'
__author__ = 'Arthur Grunseid'
__contact__ = 'grunseid@gmail.com'
__url__ = 'https://github.com/gruns/furl'
from .furl import *
|
55eb1c81f02a715adfe70c188985924fbeb340fd
|
demos/_videos_index_duration.py
|
demos/_videos_index_duration.py
|
import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python _videos_index_duration.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
total_duration = 0.0
count_duration = 0.0
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
global count_duration, total_duration
video_list = ziggeo.videos().index({"limit":100, "skip":skip})
for video in video_list:
if video['duration'] is not None:
total_duration += video['duration']
count_duration += 1
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
print("Total Duration = {:.2f} seconds, Average Duration {:.2f} seconds.".format(total_duration, ( total_duration /count_duration )))
|
Add script to get average duration based on video list data
|
Add script to get average duration based on video list data
|
Python
|
apache-2.0
|
Ziggeo/ZiggeoPythonSdk,Ziggeo/ZiggeoPythonSdk
|
Add script to get average duration based on video list data
|
import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python _videos_index_duration.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
total_duration = 0.0
count_duration = 0.0
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
global count_duration, total_duration
video_list = ziggeo.videos().index({"limit":100, "skip":skip})
for video in video_list:
if video['duration'] is not None:
total_duration += video['duration']
count_duration += 1
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
print("Total Duration = {:.2f} seconds, Average Duration {:.2f} seconds.".format(total_duration, ( total_duration /count_duration )))
|
<commit_before><commit_msg>Add script to get average duration based on video list data<commit_after>
|
import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python _videos_index_duration.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
total_duration = 0.0
count_duration = 0.0
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
global count_duration, total_duration
video_list = ziggeo.videos().index({"limit":100, "skip":skip})
for video in video_list:
if video['duration'] is not None:
total_duration += video['duration']
count_duration += 1
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
print("Total Duration = {:.2f} seconds, Average Duration {:.2f} seconds.".format(total_duration, ( total_duration /count_duration )))
|
Add script to get average duration based on video list dataimport sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python _videos_index_duration.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
total_duration = 0.0
count_duration = 0.0
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
global count_duration, total_duration
video_list = ziggeo.videos().index({"limit":100, "skip":skip})
for video in video_list:
if video['duration'] is not None:
total_duration += video['duration']
count_duration += 1
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
print("Total Duration = {:.2f} seconds, Average Duration {:.2f} seconds.".format(total_duration, ( total_duration /count_duration )))
|
<commit_before><commit_msg>Add script to get average duration based on video list data<commit_after>import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 3):
print ("Error\n")
print ("Usage: $>python _videos_index_duration.py YOUR_API_TOKEN YOUR_PRIVATE_KEY\n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
total_duration = 0.0
count_duration = 0.0
ziggeo = Ziggeo(api_token, private_key)
def indexVideos(skip=0):
global count_duration, total_duration
video_list = ziggeo.videos().index({"limit":100, "skip":skip})
for video in video_list:
if video['duration'] is not None:
total_duration += video['duration']
count_duration += 1
if(len(video_list) > 0):
indexVideos(skip+100)
pass
indexVideos(0)
print("Total Duration = {:.2f} seconds, Average Duration {:.2f} seconds.".format(total_duration, ( total_duration /count_duration )))
|
|
3154ef23b48a42e274417a28953c55b98ac3fec3
|
filters/png2jpg.py
|
filters/png2jpg.py
|
"""
Change image extensions from .png to .jpg
EXAMPLE:
>>>> echo An  | pandoc -F png2jpg.py
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Image):
elem.url = elem.url.replace('.png', '.jpg')
return elem
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
Convert .png endings to .jpg
|
Convert .png endings to .jpg
|
Python
|
bsd-3-clause
|
sergiocorreia/panflute-filters
|
Convert .png endings to .jpg
|
"""
Change image extensions from .png to .jpg
EXAMPLE:
>>>> echo An  | pandoc -F png2jpg.py
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Image):
elem.url = elem.url.replace('.png', '.jpg')
return elem
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Convert .png endings to .jpg<commit_after>
|
"""
Change image extensions from .png to .jpg
EXAMPLE:
>>>> echo An  | pandoc -F png2jpg.py
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Image):
elem.url = elem.url.replace('.png', '.jpg')
return elem
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
Convert .png endings to .jpg"""
Change image extensions from .png to .jpg
EXAMPLE:
>>>> echo An  | pandoc -F png2jpg.py
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Image):
elem.url = elem.url.replace('.png', '.jpg')
return elem
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Convert .png endings to .jpg<commit_after>"""
Change image extensions from .png to .jpg
EXAMPLE:
>>>> echo An  | pandoc -F png2jpg.py
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Image):
elem.url = elem.url.replace('.png', '.jpg')
return elem
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
|
59f9e977ee343082b9ad3e815c662d1425549e83
|
tests/test_binned_likelihood.py
|
tests/test_binned_likelihood.py
|
# from blueice.test_helpers import *
# from blueice.likelihood import BinnedLogLikelihood
#
# from scipy import stats
#
#
# def test_single_bin():
# conf = test_conf()
# conf['sources'][0]['events_per_day'] = 1
# conf['analysis_space'] = [['x', [-10, 10]]]
#
# lf = BinnedLogLikelihood(conf)
# lf.add_rate_parameter('s0')
#
# # Make a single event at x=0
# lf.set_data(np.zeros(1,
# dtype=[('x', np.float), ('source', np.int)]))
#
# assert lf() == stats.poisson(1).pmf(1)
# assert lf(s0_rate=5.4) == stats.poisson(5.4).pmf(1)
|
Add new tests (don't yet work)
|
Add new tests (don't yet work)
|
Python
|
bsd-3-clause
|
JelleAalbers/blueice
|
Add new tests (don't yet work)
|
# from blueice.test_helpers import *
# from blueice.likelihood import BinnedLogLikelihood
#
# from scipy import stats
#
#
# def test_single_bin():
# conf = test_conf()
# conf['sources'][0]['events_per_day'] = 1
# conf['analysis_space'] = [['x', [-10, 10]]]
#
# lf = BinnedLogLikelihood(conf)
# lf.add_rate_parameter('s0')
#
# # Make a single event at x=0
# lf.set_data(np.zeros(1,
# dtype=[('x', np.float), ('source', np.int)]))
#
# assert lf() == stats.poisson(1).pmf(1)
# assert lf(s0_rate=5.4) == stats.poisson(5.4).pmf(1)
|
<commit_before><commit_msg>Add new tests (don't yet work)<commit_after>
|
# from blueice.test_helpers import *
# from blueice.likelihood import BinnedLogLikelihood
#
# from scipy import stats
#
#
# def test_single_bin():
# conf = test_conf()
# conf['sources'][0]['events_per_day'] = 1
# conf['analysis_space'] = [['x', [-10, 10]]]
#
# lf = BinnedLogLikelihood(conf)
# lf.add_rate_parameter('s0')
#
# # Make a single event at x=0
# lf.set_data(np.zeros(1,
# dtype=[('x', np.float), ('source', np.int)]))
#
# assert lf() == stats.poisson(1).pmf(1)
# assert lf(s0_rate=5.4) == stats.poisson(5.4).pmf(1)
|
Add new tests (don't yet work)# from blueice.test_helpers import *
# from blueice.likelihood import BinnedLogLikelihood
#
# from scipy import stats
#
#
# def test_single_bin():
# conf = test_conf()
# conf['sources'][0]['events_per_day'] = 1
# conf['analysis_space'] = [['x', [-10, 10]]]
#
# lf = BinnedLogLikelihood(conf)
# lf.add_rate_parameter('s0')
#
# # Make a single event at x=0
# lf.set_data(np.zeros(1,
# dtype=[('x', np.float), ('source', np.int)]))
#
# assert lf() == stats.poisson(1).pmf(1)
# assert lf(s0_rate=5.4) == stats.poisson(5.4).pmf(1)
|
<commit_before><commit_msg>Add new tests (don't yet work)<commit_after># from blueice.test_helpers import *
# from blueice.likelihood import BinnedLogLikelihood
#
# from scipy import stats
#
#
# def test_single_bin():
# conf = test_conf()
# conf['sources'][0]['events_per_day'] = 1
# conf['analysis_space'] = [['x', [-10, 10]]]
#
# lf = BinnedLogLikelihood(conf)
# lf.add_rate_parameter('s0')
#
# # Make a single event at x=0
# lf.set_data(np.zeros(1,
# dtype=[('x', np.float), ('source', np.int)]))
#
# assert lf() == stats.poisson(1).pmf(1)
# assert lf(s0_rate=5.4) == stats.poisson(5.4).pmf(1)
|
|
7df6010f1dd4cfa00b48f2fe646d3bb0bd02f8ec
|
tests/fd_remote.py
|
tests/fd_remote.py
|
from __future__ import absolute_import
from filedes.test.base import BaseFDTestCase
from filedes.subprocess import Popen
from filedes import get_open_fds, FD
from subprocess import PIPE, STDOUT
import unittest2
import filedes
import socket
import tempfile
class RemoteFDTests(BaseFDTestCase):
def testPipe(self):
self.checkSubprocessFDs(filedes.pipe())
def testSocket(self):
s = socket.socket()
self.checkSubprocessFDs([FD(s.fileno())])
del s
def testTempFile(self):
f = tempfile.TemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def testNamedTempFile(self):
f = tempfile.NamedTemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def checkSubprocessFDs(self, check_fds, close=True):
try:
# Create a subprocess that prints and blocks waiting for input
p = Popen("echo ok; read foo", shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
try:
# Wait for the process to let us know it's alive
ready = p.stdout.read(3)
self.assertEquals(ready, "ok\n")
# Get the list of FDs of the remote process
remote_fds = get_open_fds(p.pid)
# Make sure the check_fds persisted and have an identical mode
for fd in check_fds:
self.assertIn(fd, remote_fds)
self.assertEquals(FD(int(fd), p.pid).mode, fd.mode)
# Now send some output to the remote process to unblock it
p.stdin.write("ok\n")
p.stdin.flush()
# Wait for it to shutdown
self.assertEquals(p.wait(), 0)
finally:
# Popen does not close PIPE fds on process shutdown
# automatically, even if there's no data in it. Since the
# exception context is propagated to the test cases' tearDown,
# the popen's pipes will show up as a leak
if p.poll() is None:
p.kill()
self.assertEquals(p.wait(), -9)
del p
finally:
if close:
for fd in check_fds:
fd.close()
if __name__ == '__main__':
unittest2.main()
|
Add unittest to verify remote stat on misc fd types
|
Add unittest to verify remote stat on misc fd types
Fixes #8
|
Python
|
isc
|
fmoo/python-filedes,fmoo/python-filedes
|
Add unittest to verify remote stat on misc fd types
Fixes #8
|
from __future__ import absolute_import
from filedes.test.base import BaseFDTestCase
from filedes.subprocess import Popen
from filedes import get_open_fds, FD
from subprocess import PIPE, STDOUT
import unittest2
import filedes
import socket
import tempfile
class RemoteFDTests(BaseFDTestCase):
def testPipe(self):
self.checkSubprocessFDs(filedes.pipe())
def testSocket(self):
s = socket.socket()
self.checkSubprocessFDs([FD(s.fileno())])
del s
def testTempFile(self):
f = tempfile.TemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def testNamedTempFile(self):
f = tempfile.NamedTemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def checkSubprocessFDs(self, check_fds, close=True):
try:
# Create a subprocess that prints and blocks waiting for input
p = Popen("echo ok; read foo", shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
try:
# Wait for the process to let us know it's alive
ready = p.stdout.read(3)
self.assertEquals(ready, "ok\n")
# Get the list of FDs of the remote process
remote_fds = get_open_fds(p.pid)
# Make sure the check_fds persisted and have an identical mode
for fd in check_fds:
self.assertIn(fd, remote_fds)
self.assertEquals(FD(int(fd), p.pid).mode, fd.mode)
# Now send some output to the remote process to unblock it
p.stdin.write("ok\n")
p.stdin.flush()
# Wait for it to shutdown
self.assertEquals(p.wait(), 0)
finally:
# Popen does not close PIPE fds on process shutdown
# automatically, even if there's no data in it. Since the
# exception context is propagated to the test cases' tearDown,
# the popen's pipes will show up as a leak
if p.poll() is None:
p.kill()
self.assertEquals(p.wait(), -9)
del p
finally:
if close:
for fd in check_fds:
fd.close()
if __name__ == '__main__':
unittest2.main()
|
<commit_before><commit_msg>Add unittest to verify remote stat on misc fd types
Fixes #8<commit_after>
|
from __future__ import absolute_import
from filedes.test.base import BaseFDTestCase
from filedes.subprocess import Popen
from filedes import get_open_fds, FD
from subprocess import PIPE, STDOUT
import unittest2
import filedes
import socket
import tempfile
class RemoteFDTests(BaseFDTestCase):
def testPipe(self):
self.checkSubprocessFDs(filedes.pipe())
def testSocket(self):
s = socket.socket()
self.checkSubprocessFDs([FD(s.fileno())])
del s
def testTempFile(self):
f = tempfile.TemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def testNamedTempFile(self):
f = tempfile.NamedTemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def checkSubprocessFDs(self, check_fds, close=True):
try:
# Create a subprocess that prints and blocks waiting for input
p = Popen("echo ok; read foo", shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
try:
# Wait for the process to let us know it's alive
ready = p.stdout.read(3)
self.assertEquals(ready, "ok\n")
# Get the list of FDs of the remote process
remote_fds = get_open_fds(p.pid)
# Make sure the check_fds persisted and have an identical mode
for fd in check_fds:
self.assertIn(fd, remote_fds)
self.assertEquals(FD(int(fd), p.pid).mode, fd.mode)
# Now send some output to the remote process to unblock it
p.stdin.write("ok\n")
p.stdin.flush()
# Wait for it to shutdown
self.assertEquals(p.wait(), 0)
finally:
# Popen does not close PIPE fds on process shutdown
# automatically, even if there's no data in it. Since the
# exception context is propagated to the test cases' tearDown,
# the popen's pipes will show up as a leak
if p.poll() is None:
p.kill()
self.assertEquals(p.wait(), -9)
del p
finally:
if close:
for fd in check_fds:
fd.close()
if __name__ == '__main__':
unittest2.main()
|
Add unittest to verify remote stat on misc fd types
Fixes #8from __future__ import absolute_import
from filedes.test.base import BaseFDTestCase
from filedes.subprocess import Popen
from filedes import get_open_fds, FD
from subprocess import PIPE, STDOUT
import unittest2
import filedes
import socket
import tempfile
class RemoteFDTests(BaseFDTestCase):
def testPipe(self):
self.checkSubprocessFDs(filedes.pipe())
def testSocket(self):
s = socket.socket()
self.checkSubprocessFDs([FD(s.fileno())])
del s
def testTempFile(self):
f = tempfile.TemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def testNamedTempFile(self):
f = tempfile.NamedTemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def checkSubprocessFDs(self, check_fds, close=True):
try:
# Create a subprocess that prints and blocks waiting for input
p = Popen("echo ok; read foo", shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
try:
# Wait for the process to let us know it's alive
ready = p.stdout.read(3)
self.assertEquals(ready, "ok\n")
# Get the list of FDs of the remote process
remote_fds = get_open_fds(p.pid)
# Make sure the check_fds persisted and have an identical mode
for fd in check_fds:
self.assertIn(fd, remote_fds)
self.assertEquals(FD(int(fd), p.pid).mode, fd.mode)
# Now send some output to the remote process to unblock it
p.stdin.write("ok\n")
p.stdin.flush()
# Wait for it to shutdown
self.assertEquals(p.wait(), 0)
finally:
# Popen does not close PIPE fds on process shutdown
# automatically, even if there's no data in it. Since the
# exception context is propagated to the test cases' tearDown,
# the popen's pipes will show up as a leak
if p.poll() is None:
p.kill()
self.assertEquals(p.wait(), -9)
del p
finally:
if close:
for fd in check_fds:
fd.close()
if __name__ == '__main__':
unittest2.main()
|
<commit_before><commit_msg>Add unittest to verify remote stat on misc fd types
Fixes #8<commit_after>from __future__ import absolute_import
from filedes.test.base import BaseFDTestCase
from filedes.subprocess import Popen
from filedes import get_open_fds, FD
from subprocess import PIPE, STDOUT
import unittest2
import filedes
import socket
import tempfile
class RemoteFDTests(BaseFDTestCase):
def testPipe(self):
self.checkSubprocessFDs(filedes.pipe())
def testSocket(self):
s = socket.socket()
self.checkSubprocessFDs([FD(s.fileno())])
del s
def testTempFile(self):
f = tempfile.TemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def testNamedTempFile(self):
f = tempfile.NamedTemporaryFile()
fd = FD(f.fileno())
# tempfile APIs set cloexec = True for added security
fd.set_cloexec(False)
self.checkSubprocessFDs([fd], close=False)
del f
def checkSubprocessFDs(self, check_fds, close=True):
try:
# Create a subprocess that prints and blocks waiting for input
p = Popen("echo ok; read foo", shell=True,
stdin=PIPE, stdout=PIPE, stderr=STDOUT)
try:
# Wait for the process to let us know it's alive
ready = p.stdout.read(3)
self.assertEquals(ready, "ok\n")
# Get the list of FDs of the remote process
remote_fds = get_open_fds(p.pid)
# Make sure the check_fds persisted and have an identical mode
for fd in check_fds:
self.assertIn(fd, remote_fds)
self.assertEquals(FD(int(fd), p.pid).mode, fd.mode)
# Now send some output to the remote process to unblock it
p.stdin.write("ok\n")
p.stdin.flush()
# Wait for it to shutdown
self.assertEquals(p.wait(), 0)
finally:
# Popen does not close PIPE fds on process shutdown
# automatically, even if there's no data in it. Since the
# exception context is propagated to the test cases' tearDown,
# the popen's pipes will show up as a leak
if p.poll() is None:
p.kill()
self.assertEquals(p.wait(), -9)
del p
finally:
if close:
for fd in check_fds:
fd.close()
if __name__ == '__main__':
unittest2.main()
|
|
c2506fdc71f1dcff2e3455c668e78ad6b7d5d94b
|
scripts/fenix/fenix_download.py
|
scripts/fenix/fenix_download.py
|
# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
import argparse
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add('-d', '--directory', type=str, default='download', help='download directory')
|
Add inital structure for fenix downloader
|
Add inital structure for fenix downloader
|
Python
|
mit
|
iluxonchik/python-general-repo
|
Add inital structure for fenix downloader
|
# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
import argparse
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add('-d', '--directory', type=str, default='download', help='download directory')
|
<commit_before><commit_msg>Add inital structure for fenix downloader<commit_after>
|
# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
import argparse
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add('-d', '--directory', type=str, default='download', help='download directory')
|
Add inital structure for fenix downloader# python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
import argparse
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add('-d', '--directory', type=str, default='download', help='download directory')
|
<commit_before><commit_msg>Add inital structure for fenix downloader<commit_after># python fenix_download login_file <url> -e <file_extension> (DEFAULT = pdf) -d <download_directory>
#
#
from fenix import Fenix
import argparse
if __name__ == '__main__':
# TODO: argparse
parser = argparse.ArgumentParser(description='Download files from Fenix Edu pages')
parser.add_argument('login', type=str, help='path to fenix login credentials file (check login.json for format)')
parser.add_argument('url', type=str, help='url from where to download the files from')
parser.add('-e', '--extension', type=str, default='pdf', help='file extensions to download, without the leading "." (* for all, default is "pdf")')
parser.add('-d', '--directory', type=str, default='download', help='download directory')
|
|
a5f0544bf0ce88ecfca515f0db344fffb90c8d8e
|
altair/vegalite/v2/examples/percentage_of_total.py
|
altair/vegalite/v2/examples/percentage_of_total.py
|
"""
Calculating Percentage of Total
-------------------------------
This chart demonstrates how to use a window transform to display data values
as a percentage of total values.
"""
import altair as alt
import pandas as pd
activities = pd.DataFrame({'Activity': ['Sleeping', 'Eating', 'TV', 'Work', 'Exercise'],
'Time': [8, 2, 4, 8, 2]})
alt.Chart(activities).mark_bar().encode(
x='PercentOfTotal:Q',
y='Activity:N'
).transform_window(
window=[alt.WindowFieldDef(op='sum', field='Time', **{'as': 'TotalTime'})],
frame=[None, None]
).transform_calculate(
PercentOfTotal="datum.Time / datum.TotalTime * 100"
)
|
Add simple window transform example
|
Add simple window transform example
|
Python
|
bsd-3-clause
|
altair-viz/altair,ellisonbg/altair,jakevdp/altair
|
Add simple window transform example
|
"""
Calculating Percentage of Total
-------------------------------
This chart demonstrates how to use a window transform to display data values
as a percentage of total values.
"""
import altair as alt
import pandas as pd
activities = pd.DataFrame({'Activity': ['Sleeping', 'Eating', 'TV', 'Work', 'Exercise'],
'Time': [8, 2, 4, 8, 2]})
alt.Chart(activities).mark_bar().encode(
x='PercentOfTotal:Q',
y='Activity:N'
).transform_window(
window=[alt.WindowFieldDef(op='sum', field='Time', **{'as': 'TotalTime'})],
frame=[None, None]
).transform_calculate(
PercentOfTotal="datum.Time / datum.TotalTime * 100"
)
|
<commit_before><commit_msg>Add simple window transform example<commit_after>
|
"""
Calculating Percentage of Total
-------------------------------
This chart demonstrates how to use a window transform to display data values
as a percentage of total values.
"""
import altair as alt
import pandas as pd
activities = pd.DataFrame({'Activity': ['Sleeping', 'Eating', 'TV', 'Work', 'Exercise'],
'Time': [8, 2, 4, 8, 2]})
alt.Chart(activities).mark_bar().encode(
x='PercentOfTotal:Q',
y='Activity:N'
).transform_window(
window=[alt.WindowFieldDef(op='sum', field='Time', **{'as': 'TotalTime'})],
frame=[None, None]
).transform_calculate(
PercentOfTotal="datum.Time / datum.TotalTime * 100"
)
|
Add simple window transform example"""
Calculating Percentage of Total
-------------------------------
This chart demonstrates how to use a window transform to display data values
as a percentage of total values.
"""
import altair as alt
import pandas as pd
activities = pd.DataFrame({'Activity': ['Sleeping', 'Eating', 'TV', 'Work', 'Exercise'],
'Time': [8, 2, 4, 8, 2]})
alt.Chart(activities).mark_bar().encode(
x='PercentOfTotal:Q',
y='Activity:N'
).transform_window(
window=[alt.WindowFieldDef(op='sum', field='Time', **{'as': 'TotalTime'})],
frame=[None, None]
).transform_calculate(
PercentOfTotal="datum.Time / datum.TotalTime * 100"
)
|
<commit_before><commit_msg>Add simple window transform example<commit_after>"""
Calculating Percentage of Total
-------------------------------
This chart demonstrates how to use a window transform to display data values
as a percentage of total values.
"""
import altair as alt
import pandas as pd
activities = pd.DataFrame({'Activity': ['Sleeping', 'Eating', 'TV', 'Work', 'Exercise'],
'Time': [8, 2, 4, 8, 2]})
alt.Chart(activities).mark_bar().encode(
x='PercentOfTotal:Q',
y='Activity:N'
).transform_window(
window=[alt.WindowFieldDef(op='sum', field='Time', **{'as': 'TotalTime'})],
frame=[None, None]
).transform_calculate(
PercentOfTotal="datum.Time / datum.TotalTime * 100"
)
|
|
9f27cf9f0669f98694e7751eb9d22064d24bdb3f
|
services/tests/test_indexing.py
|
services/tests/test_indexing.py
|
from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
Add testcase for search indexing of Service
|
Add testcase for search indexing of Service
|
Python
|
bsd-3-clause
|
theirc/ServiceInfo,theirc/ServiceInfo,theirc/ServiceInfo,theirc/ServiceInfo
|
Add testcase for search indexing of Service
|
from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
<commit_before><commit_msg>Add testcase for search indexing of Service<commit_after>
|
from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
Add testcase for search indexing of Servicefrom django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
<commit_before><commit_msg>Add testcase for search indexing of Service<commit_after>from django.test import TestCase
from services.tests.factories import ServiceFactory
from services.models import Service
from services.search_indexes import ServiceIndex
class IndexingTest(TestCase):
def setUp(self):
self.service_ar = ServiceFactory(
name_ar='Arabic', name_en='', name_fr='',
description_ar='language-of-Egypt',
status=Service.STATUS_CURRENT
)
self.service_ar.save()
self.service_en = ServiceFactory(
name_en='English', name_ar='', name_fr='',
description_en='language-of-Australia',
status=Service.STATUS_CURRENT
)
self.service_en.save()
self.service_fr = ServiceFactory(
name_fr='French', name_ar='', name_en='',
description_fr='language-of-France',
status=Service.STATUS_CURRENT
)
self.service_fr.save()
self.rejected_service_fr = ServiceFactory(
name_fr='InactiveParis', name_ar='', name_en='',
status=Service.STATUS_REJECTED
)
self.rejected_service_fr.save()
def test_querysets(self):
index = ServiceIndex()
self.assertIn(self.service_ar, index.get_index_queryset('ar'))
self.assertNotIn(self.service_ar, index.get_index_queryset('en'))
self.assertIn(self.service_fr, index.get_index_queryset('fr'))
self.assertNotIn(self.service_fr, index.get_index_queryset('ar'))
self.assertNotIn(self.rejected_service_fr, index.get_index_queryset('fr'))
def test_search_data(self):
index = ServiceIndex()
ar_data = index.get_search_data(self.service_ar, 'ar', None)
self.assertIn('Egypt', ar_data)
en_data = index.get_search_data(self.service_en, 'en', None)
self.assertIn('Australia', en_data)
|
|
729cdbfc9013bbc34bb44cc10f0f8443d604f1be
|
time_series_rnn.py
|
time_series_rnn.py
|
# Predict time series
import tensorflow as tf
import numpy as np
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
# Wrap cell in wrapper - want single output value at each step
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, swap_memory=True)
learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
|
Add code for RNN time series analysis
|
Add code for RNN time series analysis
Uses OutputProjectionWrapper
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Add code for RNN time series analysis
Uses OutputProjectionWrapper
|
# Predict time series
import tensorflow as tf
import numpy as np
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
# Wrap cell in wrapper - want single output value at each step
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, swap_memory=True)
learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
|
<commit_before><commit_msg>Add code for RNN time series analysis
Uses OutputProjectionWrapper<commit_after>
|
# Predict time series
import tensorflow as tf
import numpy as np
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
# Wrap cell in wrapper - want single output value at each step
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, swap_memory=True)
learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
|
Add code for RNN time series analysis
Uses OutputProjectionWrapper# Predict time series
import tensorflow as tf
import numpy as np
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
# Wrap cell in wrapper - want single output value at each step
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, swap_memory=True)
learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
|
<commit_before><commit_msg>Add code for RNN time series analysis
Uses OutputProjectionWrapper<commit_after># Predict time series
import tensorflow as tf
import numpy as np
# Create time series
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t * 5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:,:-1].reshape(-1, n_steps, 1), ys[:,1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, (t_max - t_min) // resolution)
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
# cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
# Wrap cell in wrapper - want single output value at each step
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32, swap_memory=True)
learning_rate = 0.001
loss = tf.reduce_sum(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
n_iterations = 1000
batch_size = 50
with tf.Session() as sess:
init.run()
for k in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if k % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(k, "\tMSE: ", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
|
|
1ec1bede9f5451aeef09d250ad4542bfb0cedb3d
|
tests/pytests/functional/modules/test_user.py
|
tests/pytests/functional/modules/test_user.py
|
import pathlib
import pytest
from saltfactories.utils import random_string
pytestmark = [
pytest.mark.skip_if_not_root,
pytest.mark.destructive_test,
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def user(modules):
return modules.user
@pytest.fixture
def username(user):
_username = random_string("test-account-", uppercase=False)
try:
yield _username
finally:
try:
user.delete(_username, remove=True, force=True)
except Exception: # pylint: disable=broad-except
# The point here is just system cleanup. It can fail if no account was created
pass
@pytest.fixture
def account(username):
with pytest.helpers.create_account(username=username) as account:
yield account
def test_add(user, username):
ret = user.add(username)
assert ret is True
def test_delete(user, account):
ret = user.delete(account.username)
assert ret is True
@pytest.mark.skip_on_windows(reason="The windows user module does not support 'remove'")
@pytest.mark.parametrize("remove", [False, True])
def test_delete_remove(user, account, remove):
"""
Test deleting a user from the system and passing ``remove`` to the call
"""
user_info = user.info(account.username)
ret = user.delete(account.username, remove=remove)
assert ret is True
if remove is True:
assert pathlib.Path(user_info["home"]).exists() is False
else:
assert pathlib.Path(user_info["home"]).exists() is True
|
Add functional tests for the salt user module
|
Add functional tests for the salt user module
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add functional tests for the salt user module
|
import pathlib
import pytest
from saltfactories.utils import random_string
pytestmark = [
pytest.mark.skip_if_not_root,
pytest.mark.destructive_test,
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def user(modules):
return modules.user
@pytest.fixture
def username(user):
_username = random_string("test-account-", uppercase=False)
try:
yield _username
finally:
try:
user.delete(_username, remove=True, force=True)
except Exception: # pylint: disable=broad-except
# The point here is just system cleanup. It can fail if no account was created
pass
@pytest.fixture
def account(username):
with pytest.helpers.create_account(username=username) as account:
yield account
def test_add(user, username):
ret = user.add(username)
assert ret is True
def test_delete(user, account):
ret = user.delete(account.username)
assert ret is True
@pytest.mark.skip_on_windows(reason="The windows user module does not support 'remove'")
@pytest.mark.parametrize("remove", [False, True])
def test_delete_remove(user, account, remove):
"""
Test deleting a user from the system and passing ``remove`` to the call
"""
user_info = user.info(account.username)
ret = user.delete(account.username, remove=remove)
assert ret is True
if remove is True:
assert pathlib.Path(user_info["home"]).exists() is False
else:
assert pathlib.Path(user_info["home"]).exists() is True
|
<commit_before><commit_msg>Add functional tests for the salt user module<commit_after>
|
import pathlib
import pytest
from saltfactories.utils import random_string
pytestmark = [
pytest.mark.skip_if_not_root,
pytest.mark.destructive_test,
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def user(modules):
return modules.user
@pytest.fixture
def username(user):
_username = random_string("test-account-", uppercase=False)
try:
yield _username
finally:
try:
user.delete(_username, remove=True, force=True)
except Exception: # pylint: disable=broad-except
# The point here is just system cleanup. It can fail if no account was created
pass
@pytest.fixture
def account(username):
with pytest.helpers.create_account(username=username) as account:
yield account
def test_add(user, username):
ret = user.add(username)
assert ret is True
def test_delete(user, account):
ret = user.delete(account.username)
assert ret is True
@pytest.mark.skip_on_windows(reason="The windows user module does not support 'remove'")
@pytest.mark.parametrize("remove", [False, True])
def test_delete_remove(user, account, remove):
"""
Test deleting a user from the system and passing ``remove`` to the call
"""
user_info = user.info(account.username)
ret = user.delete(account.username, remove=remove)
assert ret is True
if remove is True:
assert pathlib.Path(user_info["home"]).exists() is False
else:
assert pathlib.Path(user_info["home"]).exists() is True
|
Add functional tests for the salt user moduleimport pathlib
import pytest
from saltfactories.utils import random_string
pytestmark = [
pytest.mark.skip_if_not_root,
pytest.mark.destructive_test,
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def user(modules):
return modules.user
@pytest.fixture
def username(user):
_username = random_string("test-account-", uppercase=False)
try:
yield _username
finally:
try:
user.delete(_username, remove=True, force=True)
except Exception: # pylint: disable=broad-except
# The point here is just system cleanup. It can fail if no account was created
pass
@pytest.fixture
def account(username):
with pytest.helpers.create_account(username=username) as account:
yield account
def test_add(user, username):
ret = user.add(username)
assert ret is True
def test_delete(user, account):
ret = user.delete(account.username)
assert ret is True
@pytest.mark.skip_on_windows(reason="The windows user module does not support 'remove'")
@pytest.mark.parametrize("remove", [False, True])
def test_delete_remove(user, account, remove):
"""
Test deleting a user from the system and passing ``remove`` to the call
"""
user_info = user.info(account.username)
ret = user.delete(account.username, remove=remove)
assert ret is True
if remove is True:
assert pathlib.Path(user_info["home"]).exists() is False
else:
assert pathlib.Path(user_info["home"]).exists() is True
|
<commit_before><commit_msg>Add functional tests for the salt user module<commit_after>import pathlib
import pytest
from saltfactories.utils import random_string
pytestmark = [
pytest.mark.skip_if_not_root,
pytest.mark.destructive_test,
pytest.mark.windows_whitelisted,
]
@pytest.fixture(scope="module")
def user(modules):
return modules.user
@pytest.fixture
def username(user):
_username = random_string("test-account-", uppercase=False)
try:
yield _username
finally:
try:
user.delete(_username, remove=True, force=True)
except Exception: # pylint: disable=broad-except
# The point here is just system cleanup. It can fail if no account was created
pass
@pytest.fixture
def account(username):
with pytest.helpers.create_account(username=username) as account:
yield account
def test_add(user, username):
ret = user.add(username)
assert ret is True
def test_delete(user, account):
ret = user.delete(account.username)
assert ret is True
@pytest.mark.skip_on_windows(reason="The windows user module does not support 'remove'")
@pytest.mark.parametrize("remove", [False, True])
def test_delete_remove(user, account, remove):
"""
Test deleting a user from the system and passing ``remove`` to the call
"""
user_info = user.info(account.username)
ret = user.delete(account.username, remove=remove)
assert ret is True
if remove is True:
assert pathlib.Path(user_info["home"]).exists() is False
else:
assert pathlib.Path(user_info["home"]).exists() is True
|
|
a2ab40bd7da2131ff6b9d502cc3dde1f6a8531e6
|
src/legacy_data_export.py
|
src/legacy_data_export.py
|
import json
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(
MAX_TEAMS).all()
distances = list()
print "fetch distances..."
for (idx, team_from) in enumerate(teams):
location_from = MapPoint.from_team(team_from)
for team_to in teams[idx + 1]:
location_to = MapPoint.from_team(team_to)
dist = int(simple_distance(location_from, location_to) * 1000)
distances.append({"src": str(team_from.id), "dst": str(team_to.id), "value": dist, "text": str(dist)})
distances.append({"src": str(team_to.id), "dst": str(team_from.id), "value": dist, "text": str(dist)})
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distances, f)
|
Build a legacy distance export
|
Build a legacy distance export
|
Python
|
bsd-3-clause
|
janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system
|
Build a legacy distance export
|
import json
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(
MAX_TEAMS).all()
distances = list()
print "fetch distances..."
for (idx, team_from) in enumerate(teams):
location_from = MapPoint.from_team(team_from)
for team_to in teams[idx + 1]:
location_to = MapPoint.from_team(team_to)
dist = int(simple_distance(location_from, location_to) * 1000)
distances.append({"src": str(team_from.id), "dst": str(team_to.id), "value": dist, "text": str(dist)})
distances.append({"src": str(team_to.id), "dst": str(team_from.id), "value": dist, "text": str(dist)})
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distances, f)
|
<commit_before><commit_msg>Build a legacy distance export<commit_after>
|
import json
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(
MAX_TEAMS).all()
distances = list()
print "fetch distances..."
for (idx, team_from) in enumerate(teams):
location_from = MapPoint.from_team(team_from)
for team_to in teams[idx + 1]:
location_to = MapPoint.from_team(team_to)
dist = int(simple_distance(location_from, location_to) * 1000)
distances.append({"src": str(team_from.id), "dst": str(team_to.id), "value": dist, "text": str(dist)})
distances.append({"src": str(team_to.id), "dst": str(team_from.id), "value": dist, "text": str(dist)})
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distances, f)
|
Build a legacy distance exportimport json
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(
MAX_TEAMS).all()
distances = list()
print "fetch distances..."
for (idx, team_from) in enumerate(teams):
location_from = MapPoint.from_team(team_from)
for team_to in teams[idx + 1]:
location_to = MapPoint.from_team(team_to)
dist = int(simple_distance(location_from, location_to) * 1000)
distances.append({"src": str(team_from.id), "dst": str(team_to.id), "value": dist, "text": str(dist)})
distances.append({"src": str(team_to.id), "dst": str(team_from.id), "value": dist, "text": str(dist)})
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distances, f)
|
<commit_before><commit_msg>Build a legacy distance export<commit_after>import json
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(
MAX_TEAMS).all()
distances = list()
print "fetch distances..."
for (idx, team_from) in enumerate(teams):
location_from = MapPoint.from_team(team_from)
for team_to in teams[idx + 1]:
location_to = MapPoint.from_team(team_to)
dist = int(simple_distance(location_from, location_to) * 1000)
distances.append({"src": str(team_from.id), "dst": str(team_to.id), "value": dist, "text": str(dist)})
distances.append({"src": str(team_to.id), "dst": str(team_from.id), "value": dist, "text": str(dist)})
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distances, f)
|
|
43b4d63f8587bcc7078635a099f1acf48264303c
|
spacy/tests/serialize/test_serialize_tagger.py
|
spacy/tests/serialize/test_serialize_tagger.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralTagger as Tagger
import pytest
@pytest.fixture
def taggers(en_vocab):
tagger1 = Tagger(en_vocab, True)
tagger2 = Tagger(en_vocab, True)
tagger1.model = tagger1.Model(None, None)
tagger2.model = tagger2.Model(None, None)
return (tagger1, tagger2)
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
tagger1, tagger2 = taggers
tagger1_b = tagger1.to_bytes()
tagger2_b = tagger2.to_bytes()
assert tagger1_b == tagger2_b
tagger1 = tagger1.from_bytes(tagger1_b)
assert tagger1.to_bytes() == tagger1_b
new_tagger1 = Tagger(en_vocab).from_bytes(tagger1_b)
assert new_tagger1.to_bytes() == tagger1_b
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
tagger1, tagger2 = taggers
with make_tempdir() as d:
file_path1 = d / 'tagger1'
file_path2 = d / 'tagger2'
tagger1.to_disk(file_path1)
tagger2.to_disk(file_path2)
tagger1_d = Tagger(en_vocab).from_disk(file_path1)
tagger2_d = Tagger(en_vocab).from_disk(file_path2)
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
Add serialization tests for tagger
|
Add serialization tests for tagger
|
Python
|
mit
|
honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,recognai/spaCy,honnibal/spaCy,honnibal/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,recognai/spaCy,explosion/spaCy
|
Add serialization tests for tagger
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralTagger as Tagger
import pytest
@pytest.fixture
def taggers(en_vocab):
tagger1 = Tagger(en_vocab, True)
tagger2 = Tagger(en_vocab, True)
tagger1.model = tagger1.Model(None, None)
tagger2.model = tagger2.Model(None, None)
return (tagger1, tagger2)
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
tagger1, tagger2 = taggers
tagger1_b = tagger1.to_bytes()
tagger2_b = tagger2.to_bytes()
assert tagger1_b == tagger2_b
tagger1 = tagger1.from_bytes(tagger1_b)
assert tagger1.to_bytes() == tagger1_b
new_tagger1 = Tagger(en_vocab).from_bytes(tagger1_b)
assert new_tagger1.to_bytes() == tagger1_b
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
tagger1, tagger2 = taggers
with make_tempdir() as d:
file_path1 = d / 'tagger1'
file_path2 = d / 'tagger2'
tagger1.to_disk(file_path1)
tagger2.to_disk(file_path2)
tagger1_d = Tagger(en_vocab).from_disk(file_path1)
tagger2_d = Tagger(en_vocab).from_disk(file_path2)
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
<commit_before><commit_msg>Add serialization tests for tagger<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralTagger as Tagger
import pytest
@pytest.fixture
def taggers(en_vocab):
tagger1 = Tagger(en_vocab, True)
tagger2 = Tagger(en_vocab, True)
tagger1.model = tagger1.Model(None, None)
tagger2.model = tagger2.Model(None, None)
return (tagger1, tagger2)
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
tagger1, tagger2 = taggers
tagger1_b = tagger1.to_bytes()
tagger2_b = tagger2.to_bytes()
assert tagger1_b == tagger2_b
tagger1 = tagger1.from_bytes(tagger1_b)
assert tagger1.to_bytes() == tagger1_b
new_tagger1 = Tagger(en_vocab).from_bytes(tagger1_b)
assert new_tagger1.to_bytes() == tagger1_b
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
tagger1, tagger2 = taggers
with make_tempdir() as d:
file_path1 = d / 'tagger1'
file_path2 = d / 'tagger2'
tagger1.to_disk(file_path1)
tagger2.to_disk(file_path2)
tagger1_d = Tagger(en_vocab).from_disk(file_path1)
tagger2_d = Tagger(en_vocab).from_disk(file_path2)
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
Add serialization tests for tagger# coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralTagger as Tagger
import pytest
@pytest.fixture
def taggers(en_vocab):
tagger1 = Tagger(en_vocab, True)
tagger2 = Tagger(en_vocab, True)
tagger1.model = tagger1.Model(None, None)
tagger2.model = tagger2.Model(None, None)
return (tagger1, tagger2)
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
tagger1, tagger2 = taggers
tagger1_b = tagger1.to_bytes()
tagger2_b = tagger2.to_bytes()
assert tagger1_b == tagger2_b
tagger1 = tagger1.from_bytes(tagger1_b)
assert tagger1.to_bytes() == tagger1_b
new_tagger1 = Tagger(en_vocab).from_bytes(tagger1_b)
assert new_tagger1.to_bytes() == tagger1_b
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
tagger1, tagger2 = taggers
with make_tempdir() as d:
file_path1 = d / 'tagger1'
file_path2 = d / 'tagger2'
tagger1.to_disk(file_path1)
tagger2.to_disk(file_path2)
tagger1_d = Tagger(en_vocab).from_disk(file_path1)
tagger2_d = Tagger(en_vocab).from_disk(file_path2)
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
<commit_before><commit_msg>Add serialization tests for tagger<commit_after># coding: utf-8
from __future__ import unicode_literals
from ..util import make_tempdir
from ...pipeline import NeuralTagger as Tagger
import pytest
@pytest.fixture
def taggers(en_vocab):
tagger1 = Tagger(en_vocab, True)
tagger2 = Tagger(en_vocab, True)
tagger1.model = tagger1.Model(None, None)
tagger2.model = tagger2.Model(None, None)
return (tagger1, tagger2)
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
tagger1, tagger2 = taggers
tagger1_b = tagger1.to_bytes()
tagger2_b = tagger2.to_bytes()
assert tagger1_b == tagger2_b
tagger1 = tagger1.from_bytes(tagger1_b)
assert tagger1.to_bytes() == tagger1_b
new_tagger1 = Tagger(en_vocab).from_bytes(tagger1_b)
assert new_tagger1.to_bytes() == tagger1_b
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
tagger1, tagger2 = taggers
with make_tempdir() as d:
file_path1 = d / 'tagger1'
file_path2 = d / 'tagger2'
tagger1.to_disk(file_path1)
tagger2.to_disk(file_path2)
tagger1_d = Tagger(en_vocab).from_disk(file_path1)
tagger2_d = Tagger(en_vocab).from_disk(file_path2)
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
|
81728590b09270e4e32af61cdb5855bb814f683c
|
test/unit/sorting/test_quick_sort.py
|
test/unit/sorting/test_quick_sort.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.quick_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
Add unit test for quick sort implementation.
|
Add unit test for quick sort implementation.
|
Python
|
mit
|
weichen2046/algorithm-study,weichen2046/algorithm-study
|
Add unit test for quick sort implementation.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.quick_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for quick sort implementation.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.quick_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
Add unit test for quick sort implementation.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.quick_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for quick sort implementation.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from helper.read_data_file import read_int_array
from sorting.quick_sort import sort
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class InsertionSortTester(unittest.TestCase):
# Test sort in default order, i.e., in ascending order.
def test_sort_default(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array)
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in ascending order.
def test_sort_ascending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'asc')
expect = [65, 76, 86, 113, 140, 417, 444, 445, 567, 589, 637, 647, 702, 864, 969]
self.assertEqual(expect, array)
# Test sort in descending order.
def test_sort_descending(self):
array = read_int_array(os.path.join(BASE_DIR, 'data1.data'))
array = sort(array, 'desc')
expect = [969, 864, 702, 647, 637, 589, 567, 445, 444, 417, 140, 113, 86, 76, 65]
self.assertEqual(expect, array)
if __name__ == '__main__':
unittest.main()
|
|
5995fb29869e828ae7dd6bcca9eb30bfe00a959d
|
__init__.py
|
__init__.py
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,7,4),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Mesh"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,74,0),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Object"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
Fix for Blender version check
|
Fix for Blender version check
|
Python
|
mit
|
mrachinskiy/blender-addon-booltron
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,7,4),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Mesh"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
Fix for Blender version check
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,74,0),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Object"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
<commit_before>bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,7,4),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Mesh"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
<commit_msg>Fix for Blender version check<commit_after>
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,74,0),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Object"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,7,4),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Mesh"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
Fix for Blender version checkbl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,74,0),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Object"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
<commit_before>bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,7,4),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Mesh"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
<commit_msg>Fix for Blender version check<commit_after>bl_info = {
"name": "Booltron",
"author": "Mikhail Rachinskiy (jewelcourses.com)",
"version": (2000,),
"blender": (2,74,0),
"location": "3D View → Tool Shelf",
"description": "Booltron—super add-on for super fast booleans.",
"wiki_url": "https://github.com/mrachinskiy/blender-addon-booltron",
"tracker_url": "https://github.com/mrachinskiy/blender-addon-booltron/issues",
"category": "Object"}
if "bpy" in locals():
from importlib import reload
reload(utility)
reload(operators)
reload(ui)
del reload
else:
import bpy
from . import (operators, ui)
classes = (
ui.ToolShelf,
operators.UNION,
operators.DIFFERENCE,
operators.INTERSECT,
operators.SLICE,
operators.SUBTRACT,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
|
560228cbc2e95fd24f994e0a78465a031f6e0eef
|
crmapp/contacts/forms.py
|
crmapp/contacts/forms.py
|
from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name',
'role', 'phone', 'email', 'account',
)
widgets = {
'first_name': forms.TextInput(
attrs={'placeholder':'First Name', 'class':'form-control'}
),
'last_name': forms.TextInput(
attrs={'placeholder':'Last Name', 'class':'form-control'}
),
'role': forms.TextInput(
attrs={'placeholder':'Role', 'class':'form-control'}
),
'phone': forms.TextInput(
attrs={'placeholder':'Phone', 'class':'form-control'}
),
'email': forms.TextInput(
attrs={'placeholder':'Email', 'class':'form-control'}
),
}
|
Create the Contacts App - Part II > New Contact - Create Form
|
Create the Contacts App - Part II > New Contact - Create Form
|
Python
|
mit
|
deenaariff/Django,tabdon/crmeasyapp,tabdon/crmeasyapp
|
Create the Contacts App - Part II > New Contact - Create Form
|
from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name',
'role', 'phone', 'email', 'account',
)
widgets = {
'first_name': forms.TextInput(
attrs={'placeholder':'First Name', 'class':'form-control'}
),
'last_name': forms.TextInput(
attrs={'placeholder':'Last Name', 'class':'form-control'}
),
'role': forms.TextInput(
attrs={'placeholder':'Role', 'class':'form-control'}
),
'phone': forms.TextInput(
attrs={'placeholder':'Phone', 'class':'form-control'}
),
'email': forms.TextInput(
attrs={'placeholder':'Email', 'class':'form-control'}
),
}
|
<commit_before><commit_msg>Create the Contacts App - Part II > New Contact - Create Form<commit_after>
|
from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name',
'role', 'phone', 'email', 'account',
)
widgets = {
'first_name': forms.TextInput(
attrs={'placeholder':'First Name', 'class':'form-control'}
),
'last_name': forms.TextInput(
attrs={'placeholder':'Last Name', 'class':'form-control'}
),
'role': forms.TextInput(
attrs={'placeholder':'Role', 'class':'form-control'}
),
'phone': forms.TextInput(
attrs={'placeholder':'Phone', 'class':'form-control'}
),
'email': forms.TextInput(
attrs={'placeholder':'Email', 'class':'form-control'}
),
}
|
Create the Contacts App - Part II > New Contact - Create Formfrom django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name',
'role', 'phone', 'email', 'account',
)
widgets = {
'first_name': forms.TextInput(
attrs={'placeholder':'First Name', 'class':'form-control'}
),
'last_name': forms.TextInput(
attrs={'placeholder':'Last Name', 'class':'form-control'}
),
'role': forms.TextInput(
attrs={'placeholder':'Role', 'class':'form-control'}
),
'phone': forms.TextInput(
attrs={'placeholder':'Phone', 'class':'form-control'}
),
'email': forms.TextInput(
attrs={'placeholder':'Email', 'class':'form-control'}
),
}
|
<commit_before><commit_msg>Create the Contacts App - Part II > New Contact - Create Form<commit_after>from django import forms
from .models import Contact
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('first_name', 'last_name',
'role', 'phone', 'email', 'account',
)
widgets = {
'first_name': forms.TextInput(
attrs={'placeholder':'First Name', 'class':'form-control'}
),
'last_name': forms.TextInput(
attrs={'placeholder':'Last Name', 'class':'form-control'}
),
'role': forms.TextInput(
attrs={'placeholder':'Role', 'class':'form-control'}
),
'phone': forms.TextInput(
attrs={'placeholder':'Phone', 'class':'form-control'}
),
'email': forms.TextInput(
attrs={'placeholder':'Email', 'class':'form-control'}
),
}
|
|
b273248f1c33abfe355657e8b0e4e85492efb10d
|
designate/tests/test_api/test_v1/test_limits.py
|
designate/tests/test_api/test_v1/test_limits.py
|
# coding=utf-8
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1LimitsTest(ApiV1Test):
def test_get_limits_schema(self):
response = self.get('/schemas/limits')
self.assertIn('id', response.json)
self.assertIn('description', response.json)
self.assertIn('title', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
def test_get_limits(self):
response = self.get('/limits')
self.assertIn('limits', response.json)
self.assertIn('absolute', response.json['limits'])
self.assertIn('maxDomains', response.json['limits']['absolute'])
self.assertIn('maxDomainRecords', response.json['limits']['absolute'])
|
Add tests for limits api in V1 api
|
Add tests for limits api in V1 api
Change-Id: Id05520acf5e3a62647915829d72ca7afd3916337
|
Python
|
apache-2.0
|
cneill/designate-testing,muraliselva10/designate,ionrock/designate,grahamhayes/designate,ramsateesh/designate,tonyli71/designate,cneill/designate-testing,grahamhayes/designate,tonyli71/designate,ramsateesh/designate,openstack/designate,grahamhayes/designate,muraliselva10/designate,cneill/designate-testing,ionrock/designate,openstack/designate,muraliselva10/designate,tonyli71/designate,ionrock/designate,openstack/designate,ramsateesh/designate
|
Add tests for limits api in V1 api
Change-Id: Id05520acf5e3a62647915829d72ca7afd3916337
|
# coding=utf-8
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1LimitsTest(ApiV1Test):
def test_get_limits_schema(self):
response = self.get('/schemas/limits')
self.assertIn('id', response.json)
self.assertIn('description', response.json)
self.assertIn('title', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
def test_get_limits(self):
response = self.get('/limits')
self.assertIn('limits', response.json)
self.assertIn('absolute', response.json['limits'])
self.assertIn('maxDomains', response.json['limits']['absolute'])
self.assertIn('maxDomainRecords', response.json['limits']['absolute'])
|
<commit_before><commit_msg>Add tests for limits api in V1 api
Change-Id: Id05520acf5e3a62647915829d72ca7afd3916337<commit_after>
|
# coding=utf-8
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1LimitsTest(ApiV1Test):
def test_get_limits_schema(self):
response = self.get('/schemas/limits')
self.assertIn('id', response.json)
self.assertIn('description', response.json)
self.assertIn('title', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
def test_get_limits(self):
response = self.get('/limits')
self.assertIn('limits', response.json)
self.assertIn('absolute', response.json['limits'])
self.assertIn('maxDomains', response.json['limits']['absolute'])
self.assertIn('maxDomainRecords', response.json['limits']['absolute'])
|
Add tests for limits api in V1 api
Change-Id: Id05520acf5e3a62647915829d72ca7afd3916337# coding=utf-8
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1LimitsTest(ApiV1Test):
def test_get_limits_schema(self):
response = self.get('/schemas/limits')
self.assertIn('id', response.json)
self.assertIn('description', response.json)
self.assertIn('title', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
def test_get_limits(self):
response = self.get('/limits')
self.assertIn('limits', response.json)
self.assertIn('absolute', response.json['limits'])
self.assertIn('maxDomains', response.json['limits']['absolute'])
self.assertIn('maxDomainRecords', response.json['limits']['absolute'])
|
<commit_before><commit_msg>Add tests for limits api in V1 api
Change-Id: Id05520acf5e3a62647915829d72ca7afd3916337<commit_after># coding=utf-8
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests.test_api.test_v1 import ApiV1Test
LOG = logging.getLogger(__name__)
class ApiV1LimitsTest(ApiV1Test):
def test_get_limits_schema(self):
response = self.get('/schemas/limits')
self.assertIn('id', response.json)
self.assertIn('description', response.json)
self.assertIn('title', response.json)
self.assertIn('additionalProperties', response.json)
self.assertIn('properties', response.json)
def test_get_limits(self):
response = self.get('/limits')
self.assertIn('limits', response.json)
self.assertIn('absolute', response.json['limits'])
self.assertIn('maxDomains', response.json['limits']['absolute'])
self.assertIn('maxDomainRecords', response.json['limits']['absolute'])
|
|
52b62e2bfd5bef7ad1047259d0516539c20a2442
|
scifight_proj/management/commands/changepassword_quiet.py
|
scifight_proj/management/commands/changepassword_quiet.py
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Quietly change a user's password for django.contrib.auth."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('username', type=str,
help='Username to change password for.')
parser.add_argument('password', type=str,
help='Password (will not be validated).')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
username = options['username']
password = options['password']
user_model_cls = get_user_model()
try:
u = user_model_cls._default_manager \
.using(options.get('database')) \
.get(**{user_model_cls.USERNAME_FIELD: username})
except user_model_cls.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
u.set_password(password)
u.save()
return "Password changed successfully for user '%s'" % u
|
Add 'manage.py' command for unattended password setup
|
Add 'manage.py' command for unattended password setup
Unfortunately, there is no standard way to non-interactively create
superuser account, neither in Django, nor in django-extensions. All the
following commands require manual pasword-typing:
- createsuperuser
- changepassword
- passwd
- set_fake_passwords (works with DEBUG=True only)
So it was really necessary to invent our own wheel in order to make
convenient unattended deploys (see: Docker, Heroku) possible.
Signed-off-by: Pavel Kretov <7cae407f1f3b56350827d1394be40a3e8f9cb303@gmail.com>
|
Python
|
agpl-3.0
|
turboj55/scifight,turboj55/scifight,scifight/scifight,scifight/scifight,turboj55/scifight,scifight/scifight
|
Add 'manage.py' command for unattended password setup
Unfortunately, there is no standard way to non-interactively create
superuser account, neither in Django, nor in django-extensions. All the
following commands require manual pasword-typing:
- createsuperuser
- changepassword
- passwd
- set_fake_passwords (works with DEBUG=True only)
So it was really necessary to invent our own wheel in order to make
convenient unattended deploys (see: Docker, Heroku) possible.
Signed-off-by: Pavel Kretov <7cae407f1f3b56350827d1394be40a3e8f9cb303@gmail.com>
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Quietly change a user's password for django.contrib.auth."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('username', type=str,
help='Username to change password for.')
parser.add_argument('password', type=str,
help='Password (will not be validated).')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
username = options['username']
password = options['password']
user_model_cls = get_user_model()
try:
u = user_model_cls._default_manager \
.using(options.get('database')) \
.get(**{user_model_cls.USERNAME_FIELD: username})
except user_model_cls.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
u.set_password(password)
u.save()
return "Password changed successfully for user '%s'" % u
|
<commit_before><commit_msg>Add 'manage.py' command for unattended password setup
Unfortunately, there is no standard way to non-interactively create
superuser account, neither in Django, nor in django-extensions. All the
following commands require manual pasword-typing:
- createsuperuser
- changepassword
- passwd
- set_fake_passwords (works with DEBUG=True only)
So it was really necessary to invent our own wheel in order to make
convenient unattended deploys (see: Docker, Heroku) possible.
Signed-off-by: Pavel Kretov <7cae407f1f3b56350827d1394be40a3e8f9cb303@gmail.com><commit_after>
|
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Quietly change a user's password for django.contrib.auth."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('username', type=str,
help='Username to change password for.')
parser.add_argument('password', type=str,
help='Password (will not be validated).')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
username = options['username']
password = options['password']
user_model_cls = get_user_model()
try:
u = user_model_cls._default_manager \
.using(options.get('database')) \
.get(**{user_model_cls.USERNAME_FIELD: username})
except user_model_cls.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
u.set_password(password)
u.save()
return "Password changed successfully for user '%s'" % u
|
Add 'manage.py' command for unattended password setup
Unfortunately, there is no standard way to non-interactively create
superuser account, neither in Django, nor in django-extensions. All the
following commands require manual pasword-typing:
- createsuperuser
- changepassword
- passwd
- set_fake_passwords (works with DEBUG=True only)
So it was really necessary to invent our own wheel in order to make
convenient unattended deploys (see: Docker, Heroku) possible.
Signed-off-by: Pavel Kretov <7cae407f1f3b56350827d1394be40a3e8f9cb303@gmail.com>from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Quietly change a user's password for django.contrib.auth."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('username', type=str,
help='Username to change password for.')
parser.add_argument('password', type=str,
help='Password (will not be validated).')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
username = options['username']
password = options['password']
user_model_cls = get_user_model()
try:
u = user_model_cls._default_manager \
.using(options.get('database')) \
.get(**{user_model_cls.USERNAME_FIELD: username})
except user_model_cls.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
u.set_password(password)
u.save()
return "Password changed successfully for user '%s'" % u
|
<commit_before><commit_msg>Add 'manage.py' command for unattended password setup
Unfortunately, there is no standard way to non-interactively create
superuser account, neither in Django, nor in django-extensions. All the
following commands require manual pasword-typing:
- createsuperuser
- changepassword
- passwd
- set_fake_passwords (works with DEBUG=True only)
So it was really necessary to invent our own wheel in order to make
convenient unattended deploys (see: Docker, Heroku) possible.
Signed-off-by: Pavel Kretov <7cae407f1f3b56350827d1394be40a3e8f9cb303@gmail.com><commit_after>from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Quietly change a user's password for django.contrib.auth."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('username', type=str,
help='Username to change password for.')
parser.add_argument('password', type=str,
help='Password (will not be validated).')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
def handle(self, *args, **options):
username = options['username']
password = options['password']
user_model_cls = get_user_model()
try:
u = user_model_cls._default_manager \
.using(options.get('database')) \
.get(**{user_model_cls.USERNAME_FIELD: username})
except user_model_cls.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
u.set_password(password)
u.save()
return "Password changed successfully for user '%s'" % u
|
|
7f4d21ad84dc12165ea65abd1606d4aa3689e3cb
|
headers/cpp/nonvirtual_dtors.py
|
headers/cpp/nonvirtual_dtors.py
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
import sys
from cpp import ast
from cpp import metrics
from cpp import utils
def _FindWarnings(filename, source, ast_list):
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print '%s:%d' % (filename, lines.GetLineNumber(class_node.start)),
print class_node.name, 'has virtual methods without a virtual dtor'
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print 'Processing', filename
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
_FindWarnings(filename, source, entire_ast)
except:
# An error message was already printed since we couldn't parse.
pass
if __name__ == '__main__':
main(sys.argv)
|
Add script to find destructors which are not virtual, but should be
|
Add script to find destructors which are not virtual, but should be
git-svn-id: b0ea89ea3bf41df64b6a046736e217d0ae4a0fba@66 806ff5bb-693f-0410-b502-81bc3482ff28
|
Python
|
apache-2.0
|
myint/cppclean,myint/cppclean,myint/cppclean,myint/cppclean
|
Add script to find destructors which are not virtual, but should be
git-svn-id: b0ea89ea3bf41df64b6a046736e217d0ae4a0fba@66 806ff5bb-693f-0410-b502-81bc3482ff28
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
import sys
from cpp import ast
from cpp import metrics
from cpp import utils
def _FindWarnings(filename, source, ast_list):
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print '%s:%d' % (filename, lines.GetLineNumber(class_node.start)),
print class_node.name, 'has virtual methods without a virtual dtor'
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print 'Processing', filename
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
_FindWarnings(filename, source, entire_ast)
except:
# An error message was already printed since we couldn't parse.
pass
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add script to find destructors which are not virtual, but should be
git-svn-id: b0ea89ea3bf41df64b6a046736e217d0ae4a0fba@66 806ff5bb-693f-0410-b502-81bc3482ff28<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
import sys
from cpp import ast
from cpp import metrics
from cpp import utils
def _FindWarnings(filename, source, ast_list):
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print '%s:%d' % (filename, lines.GetLineNumber(class_node.start)),
print class_node.name, 'has virtual methods without a virtual dtor'
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print 'Processing', filename
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
_FindWarnings(filename, source, entire_ast)
except:
# An error message was already printed since we couldn't parse.
pass
if __name__ == '__main__':
main(sys.argv)
|
Add script to find destructors which are not virtual, but should be
git-svn-id: b0ea89ea3bf41df64b6a046736e217d0ae4a0fba@66 806ff5bb-693f-0410-b502-81bc3482ff28#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
import sys
from cpp import ast
from cpp import metrics
from cpp import utils
def _FindWarnings(filename, source, ast_list):
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print '%s:%d' % (filename, lines.GetLineNumber(class_node.start)),
print class_node.name, 'has virtual methods without a virtual dtor'
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print 'Processing', filename
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
_FindWarnings(filename, source, entire_ast)
except:
# An error message was already printed since we couldn't parse.
pass
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add script to find destructors which are not virtual, but should be
git-svn-id: b0ea89ea3bf41df64b6a046736e217d0ae4a0fba@66 806ff5bb-693f-0410-b502-81bc3482ff28<commit_after>#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes which have a virtual method and non-virtual destructor."""
import sys
from cpp import ast
from cpp import metrics
from cpp import utils
def _FindWarnings(filename, source, ast_list):
for node in ast_list:
if isinstance(node, ast.Class) and node.body:
class_node = node
has_virtuals = False
for node in node.body:
if (isinstance(node, ast.Function) and
node.modifiers & ast.FUNCTION_VIRTUAL):
has_virtuals = True
if node.modifiers & ast.FUNCTION_DTOR:
break
else:
if has_virtuals and not class_node.bases:
lines = metrics.Metrics(source)
print '%s:%d' % (filename, lines.GetLineNumber(class_node.start)),
print class_node.name, 'has virtual methods without a virtual dtor'
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print 'Processing', filename
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
_FindWarnings(filename, source, entire_ast)
except:
# An error message was already printed since we couldn't parse.
pass
if __name__ == '__main__':
main(sys.argv)
|
|
50886a39d3cda7b487ed3862626d565c80737add
|
calaccess_processed/migrations/0011_auto_20171023_1620.py
|
calaccess_processed/migrations/0011_auto_20171023_1620.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 16:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0010_auto_20171013_1838'),
]
operations = [
migrations.AlterModelOptions(
name='fileridvalue',
options={'verbose_name': 'Filer ID value'},
),
migrations.AlterModelOptions(
name='filingidvalue',
options={'verbose_name': 'Filing ID value'},
),
migrations.AlterModelOptions(
name='processeddatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS processed data file'},
),
]
|
Add migrations for verbose name changes
|
Add migrations for verbose name changes
|
Python
|
mit
|
california-civic-data-coalition/django-calaccess-processed-data,california-civic-data-coalition/django-calaccess-processed-data
|
Add migrations for verbose name changes
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 16:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0010_auto_20171013_1838'),
]
operations = [
migrations.AlterModelOptions(
name='fileridvalue',
options={'verbose_name': 'Filer ID value'},
),
migrations.AlterModelOptions(
name='filingidvalue',
options={'verbose_name': 'Filing ID value'},
),
migrations.AlterModelOptions(
name='processeddatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS processed data file'},
),
]
|
<commit_before><commit_msg>Add migrations for verbose name changes<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 16:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0010_auto_20171013_1838'),
]
operations = [
migrations.AlterModelOptions(
name='fileridvalue',
options={'verbose_name': 'Filer ID value'},
),
migrations.AlterModelOptions(
name='filingidvalue',
options={'verbose_name': 'Filing ID value'},
),
migrations.AlterModelOptions(
name='processeddatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS processed data file'},
),
]
|
Add migrations for verbose name changes# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 16:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0010_auto_20171013_1838'),
]
operations = [
migrations.AlterModelOptions(
name='fileridvalue',
options={'verbose_name': 'Filer ID value'},
),
migrations.AlterModelOptions(
name='filingidvalue',
options={'verbose_name': 'Filing ID value'},
),
migrations.AlterModelOptions(
name='processeddatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS processed data file'},
),
]
|
<commit_before><commit_msg>Add migrations for verbose name changes<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-23 16:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_processed', '0010_auto_20171013_1838'),
]
operations = [
migrations.AlterModelOptions(
name='fileridvalue',
options={'verbose_name': 'Filer ID value'},
),
migrations.AlterModelOptions(
name='filingidvalue',
options={'verbose_name': 'Filing ID value'},
),
migrations.AlterModelOptions(
name='processeddatafile',
options={'ordering': ('-version_id', 'file_name'), 'verbose_name': 'TRACKING: CAL-ACCESS processed data file'},
),
]
|
|
d388709d1c7d52cf1f2552bcfdbfd6b83b578675
|
tools/perf/benchmarks/robohornet_pro.py
|
tools/perf/benchmarks/robohornet_pro.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
Raise robohornetpro timeout. Is it timing out on cros.
|
[Telemetry] Raise robohornetpro timeout. Is it timing out on cros.
BUG=266129
Review URL: https://chromiumcodereview.appspot.com/21297004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@214925 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
ChromiumWebApps/chromium,ChromiumWebApps/chromium,anirudhSK/chromium,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,anirudhSK/chromium,patrickm/chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,ltilve/chromium,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Chilledheart/chromium,ltilve/chromium,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,ltilve/chromium,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,littlstar/chromium.src,ChromiumWebApps/chromium,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src,fujunwei/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,littlstar/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,jaruba/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,jaruba/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,Jonekee/chromium.src,dednal/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,ltilve/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,anirudhSK/chromium,mogoweb/chromium-crosswalk,M4sse/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,littlstar/chromium.src,patrickm/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Chilledheart/chromium,chuan9/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,anirudhSK/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,Just-D/chromium-1,M4sse/chromium.src,patrickm/chromium.src,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,Chilledheart/chromium,dushu1203/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,M4sse/chromium.src,Chilledheart/chromium
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
[Telemetry] Raise robohornetpro timeout. Is it timing out on cros.
BUG=266129
Review URL: https://chromiumcodereview.appspot.com/21297004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@214925 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
<commit_before># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
<commit_msg>[Telemetry] Raise robohornetpro timeout. Is it timing out on cros.
BUG=266129
Review URL: https://chromiumcodereview.appspot.com/21297004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@214925 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
[Telemetry] Raise robohornetpro timeout. Is it timing out on cros.
BUG=266129
Review URL: https://chromiumcodereview.appspot.com/21297004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@214925 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
<commit_before># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 60)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
<commit_msg>[Telemetry] Raise robohornetpro timeout. Is it timing out on cros.
BUG=266129
Review URL: https://chromiumcodereview.appspot.com/21297004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@214925 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Microsoft's RoboHornet Pro benchmark."""
import os
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class RobohornetProMeasurement(page_measurement.PageMeasurement):
def MeasurePage(self, _, tab, results):
tab.ExecuteJavaScript('ToggleRoboHornet()')
done = 'document.getElementById("results").innerHTML.indexOf("Total") != -1'
def _IsDone():
return tab.EvaluateJavaScript(done)
util.WaitFor(_IsDone, 120)
result = int(tab.EvaluateJavaScript('stopTime - startTime'))
results.Add('Total', 'ms', result)
class RobohornetPro(test.Test):
test = RobohornetProMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../data/robohornetpro.json',
# Measurement require use of real Date.now() for measurement.
'make_javascript_deterministic': False,
'pages': [
{ 'url':
'http://ie.microsoft.com/testdrive/performance/robohornetpro/' }
]
}, os.path.abspath(__file__))
|
5c1fad9e6a75ee43d3a3b7bce6c9249cf601b4b9
|
tendrl/commons/objects/cluster_tendrl_context/__init__.py
|
tendrl/commons/objects/cluster_tendrl_context/__init__.py
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.node_context.node_id
return super(_ClusterTendrlContextEtcd, self).render()
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.tendrl_context.integration_id
return super(_ClusterTendrlContextEtcd, self).render()
|
Write cluster_tendrl_context to proper location
|
Write cluster_tendrl_context to proper location
Currently it is written to clusters/<node-id>/TendrlContext
This is fixed in this PR
tendrl-bug-id: Tendrl/commons#302
Signed-off-by: nnDarshan <d2c6d450ab98b078f2f1942c995e6d92dd504bc8@gmail.com>
|
Python
|
lgpl-2.1
|
r0h4n/commons,Tendrl/commons,rishubhjain/commons
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.node_context.node_id
return super(_ClusterTendrlContextEtcd, self).render()
Write cluster_tendrl_context to proper location
Currently it is written to clusters/<node-id>/TendrlContext
This is fixed in this PR
tendrl-bug-id: Tendrl/commons#302
Signed-off-by: nnDarshan <d2c6d450ab98b078f2f1942c995e6d92dd504bc8@gmail.com>
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.tendrl_context.integration_id
return super(_ClusterTendrlContextEtcd, self).render()
|
<commit_before>import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.node_context.node_id
return super(_ClusterTendrlContextEtcd, self).render()
<commit_msg>Write cluster_tendrl_context to proper location
Currently it is written to clusters/<node-id>/TendrlContext
This is fixed in this PR
tendrl-bug-id: Tendrl/commons#302
Signed-off-by: nnDarshan <d2c6d450ab98b078f2f1942c995e6d92dd504bc8@gmail.com><commit_after>
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.tendrl_context.integration_id
return super(_ClusterTendrlContextEtcd, self).render()
|
import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.node_context.node_id
return super(_ClusterTendrlContextEtcd, self).render()
Write cluster_tendrl_context to proper location
Currently it is written to clusters/<node-id>/TendrlContext
This is fixed in this PR
tendrl-bug-id: Tendrl/commons#302
Signed-off-by: nnDarshan <d2c6d450ab98b078f2f1942c995e6d92dd504bc8@gmail.com>import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.tendrl_context.integration_id
return super(_ClusterTendrlContextEtcd, self).render()
|
<commit_before>import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.node_context.node_id
return super(_ClusterTendrlContextEtcd, self).render()
<commit_msg>Write cluster_tendrl_context to proper location
Currently it is written to clusters/<node-id>/TendrlContext
This is fixed in this PR
tendrl-bug-id: Tendrl/commons#302
Signed-off-by: nnDarshan <d2c6d450ab98b078f2f1942c995e6d92dd504bc8@gmail.com><commit_after>import json
import logging
import os
import socket
import uuid
from tendrl.commons.etcdobj import EtcdObj
from tendrl.commons.utils import cmd_utils
from tendrl.commons import objects
LOG = logging.getLogger(__name__)
class ClusterTendrlContext(objects.BaseObject):
def __init__(
self,
integration_id=None,
cluster_id=None,
cluster_name=None,
sds_name=None,
sds_version=None,
*args, **kwargs):
super(ClusterTendrlContext, self).__init__(*args, **kwargs)
self.value = 'clusters/%s/TendrlContext'
# integration_id is the Tendrl generated cluster UUID
self.integration_id = integration_id
self.cluster_id=cluster_id
self.cluster_name=cluster_name
self.sds_name=sds_name
self.sds_version=sds_version
self._etcd_cls = _ClusterTendrlContextEtcd
class _ClusterTendrlContextEtcd(EtcdObj):
"""A table of the cluster tendrl context, lazily updated
"""
__name__ = 'clusters/%s/TendrlContext'
_tendrl_cls = ClusterTendrlContext
def render(self):
self.__name__ = self.__name__ % NS.tendrl_context.integration_id
return super(_ClusterTendrlContextEtcd, self).render()
|
9a3695316f469bb70161d50665697ab248b0d7f1
|
vistrails/tests/resources/console_mode_test.py
|
vistrails/tests/resources/console_mode_test.py
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at vistrails@sci.utah.edu.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Testing package for console_mode"""
##############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import Float
class TestTupleExecution(Module):
def compute(self):
v1, v2 = self.getInputFromPort('input')
self.setResult('output', v1 + v2)
##############################################################################
def initialize():
reg = core.modules.module_registry
reg.addModule(TestTupleExecution)
reg.addInputPort(TestTupleExecution, 'input', [Float, Float])
reg.addOutputPort(TestTupleExecution, 'output', (Float, 'output'))
|
Package for console_mode test suite.
|
Package for console_mode test suite.
|
Python
|
bsd-3-clause
|
Nikea/VisTrails,Nikea/VisTrails,minesense/VisTrails,VisTrails/VisTrails,minesense/VisTrails,hjanime/VisTrails,minesense/VisTrails,Nikea/VisTrails,celiafish/VisTrails,VisTrails/VisTrails,celiafish/VisTrails,hjanime/VisTrails,hjanime/VisTrails,minesense/VisTrails,celiafish/VisTrails,hjanime/VisTrails,VisTrails/VisTrails,hjanime/VisTrails,VisTrails/VisTrails,minesense/VisTrails,VisTrails/VisTrails,celiafish/VisTrails,Nikea/VisTrails
|
Package for console_mode test suite.
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at vistrails@sci.utah.edu.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Testing package for console_mode"""
##############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import Float
class TestTupleExecution(Module):
def compute(self):
v1, v2 = self.getInputFromPort('input')
self.setResult('output', v1 + v2)
##############################################################################
def initialize():
reg = core.modules.module_registry
reg.addModule(TestTupleExecution)
reg.addInputPort(TestTupleExecution, 'input', [Float, Float])
reg.addOutputPort(TestTupleExecution, 'output', (Float, 'output'))
|
<commit_before><commit_msg>Package for console_mode test suite.<commit_after>
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at vistrails@sci.utah.edu.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Testing package for console_mode"""
##############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import Float
class TestTupleExecution(Module):
def compute(self):
v1, v2 = self.getInputFromPort('input')
self.setResult('output', v1 + v2)
##############################################################################
def initialize():
reg = core.modules.module_registry
reg.addModule(TestTupleExecution)
reg.addInputPort(TestTupleExecution, 'input', [Float, Float])
reg.addOutputPort(TestTupleExecution, 'output', (Float, 'output'))
|
Package for console_mode test suite.############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at vistrails@sci.utah.edu.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Testing package for console_mode"""
##############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import Float
class TestTupleExecution(Module):
def compute(self):
v1, v2 = self.getInputFromPort('input')
self.setResult('output', v1 + v2)
##############################################################################
def initialize():
reg = core.modules.module_registry
reg.addModule(TestTupleExecution)
reg.addInputPort(TestTupleExecution, 'input', [Float, Float])
reg.addOutputPort(TestTupleExecution, 'output', (Float, 'output'))
|
<commit_before><commit_msg>Package for console_mode test suite.<commit_after>############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at vistrails@sci.utah.edu.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
"""Testing package for console_mode"""
##############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from core.modules.basic_modules import Float
class TestTupleExecution(Module):
def compute(self):
v1, v2 = self.getInputFromPort('input')
self.setResult('output', v1 + v2)
##############################################################################
def initialize():
reg = core.modules.module_registry
reg.addModule(TestTupleExecution)
reg.addInputPort(TestTupleExecution, 'input', [Float, Float])
reg.addOutputPort(TestTupleExecution, 'output', (Float, 'output'))
|
|
8efab0283613dd09b09b1771fe35732c9cbc5cea
|
common/config.py
|
common/config.py
|
import imp
module = imp.new_module('config')
module.__file__ = 'config.py'
config = {}
config_file = open(module.__file__)
exec(compile(config_file.read(), 'config.py', 'exec'), module.__dict__)
for key in dir(module):
if key.isupper():
config[key] = getattr(module, key)
|
Add missing file for last commit
|
Add missing file for last commit
|
Python
|
isc
|
tobbez/lys-reader
|
Add missing file for last commit
|
import imp
module = imp.new_module('config')
module.__file__ = 'config.py'
config = {}
config_file = open(module.__file__)
exec(compile(config_file.read(), 'config.py', 'exec'), module.__dict__)
for key in dir(module):
if key.isupper():
config[key] = getattr(module, key)
|
<commit_before><commit_msg>Add missing file for last commit<commit_after>
|
import imp
module = imp.new_module('config')
module.__file__ = 'config.py'
config = {}
config_file = open(module.__file__)
exec(compile(config_file.read(), 'config.py', 'exec'), module.__dict__)
for key in dir(module):
if key.isupper():
config[key] = getattr(module, key)
|
Add missing file for last commitimport imp
module = imp.new_module('config')
module.__file__ = 'config.py'
config = {}
config_file = open(module.__file__)
exec(compile(config_file.read(), 'config.py', 'exec'), module.__dict__)
for key in dir(module):
if key.isupper():
config[key] = getattr(module, key)
|
<commit_before><commit_msg>Add missing file for last commit<commit_after>import imp
module = imp.new_module('config')
module.__file__ = 'config.py'
config = {}
config_file = open(module.__file__)
exec(compile(config_file.read(), 'config.py', 'exec'), module.__dict__)
for key in dir(module):
if key.isupper():
config[key] = getattr(module, key)
|
|
ab9fed3a9a9152ed266d40eab2f16b12c77349fd
|
tests/test_vcs_bazaar.py
|
tests/test_vcs_bazaar.py
|
from tests.test_pip import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
https_bzr_repo = Bazaar(url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ssh_bzr_repo = Bazaar(url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ftp_bzr_repo = Bazaar(url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
sftp_bzr_repo = Bazaar(url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
launchpad_bzr_repo = Bazaar(url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject')
assert http_bzr_repo.get_url_rev() == ('http://bzr.myproject.org/MyProject/trunk/', None)
assert https_bzr_repo.get_url_rev() == ('https://bzr.myproject.org/MyProject/trunk/', None)
assert ssh_bzr_repo.get_url_rev() == ('bzr+ssh://bzr.myproject.org/MyProject/trunk/', None)
assert ftp_bzr_repo.get_url_rev() == ('ftp://bzr.myproject.org/MyProject/trunk/', None)
assert sftp_bzr_repo.get_url_rev() == ('sftp://bzr.myproject.org/MyProject/trunk/', None)
assert launchpad_bzr_repo.get_url_rev() == ('lp:MyLaunchpadProject', None)
|
Add unit tests for bazaar URL handling
|
Add unit tests for bazaar URL handling
Tested on 2.4.4, 2.7.1 and 3.2
nosetests tests.test_vcs_bazaar passes in each, full setup.py test gives:
2.4:
----------------------------------------------------------------------
Ran 107 tests in 167.345s
OK
2.7 fails 1 on Test freezing a mercurial clone
Script result: python setup.py develop
-- stderr: --------------------
warning: no files found matching '*.html' under directory 'docs'
warning: no previously-included files matching '*.txt' found under directory 'docs/build'
no previously-included directories found matching 'docs/build/html/_sources'
3.2
I got a bunch of failures, mostly environments (bzr, hg) and fragility I think.
----------------------------------------------------------------------
Ran 107 tests in 289.871s
FAILED (failures=5, errors=6, skipped=2)
|
Python
|
mit
|
pypa/pip,willingc/pip,jythontools/pip,blarghmatey/pip,patricklaw/pip,domenkozar/pip,rouge8/pip,squidsoup/pip,zorosteven/pip,benesch/pip,pjdelport/pip,zvezdan/pip,dstufft/pip,supriyantomaftuh/pip,sbidoul/pip,caosmo/pip,natefoo/pip,h4ck3rm1k3/pip,techtonik/pip,minrk/pip,jasonkying/pip,sigmavirus24/pip,RonnyPfannschmidt/pip,erikrose/pip,habnabit/pip,mindw/pip,nthall/pip,zenlambda/pip,mujiansu/pip,pjdelport/pip,dstufft/pip,ianw/pip,atdaemon/pip,ChristopherHogan/pip,alquerci/pip,qbdsoft/pip,cjerdonek/pip,prasaianooz/pip,Carreau/pip,davidovich/pip,techtonik/pip,blarghmatey/pip,jamezpolley/pip,jamezpolley/pip,esc/pip,prasaianooz/pip,davidovich/pip,habnabit/pip,graingert/pip,chaoallsome/pip,pfmoore/pip,jmagnusson/pip,zenlambda/pip,mindw/pip,esc/pip,fiber-space/pip,mattrobenolt/pip,zvezdan/pip,James-Firth/pip,squidsoup/pip,haridsv/pip,ncoghlan/pip,fiber-space/pip,Gabriel439/pip,harrisonfeng/pip,rbtcollins/pip,pradyunsg/pip,James-Firth/pip,tdsmith/pip,pypa/pip,jasonkying/pip,blarghmatey/pip,jmagnusson/pip,KarelJakubec/pip,benesch/pip,zvezdan/pip,willingc/pip,jythontools/pip,h4ck3rm1k3/pip,Gabriel439/pip,rbtcollins/pip,cjerdonek/pip,erikrose/pip,mujiansu/pip,supriyantomaftuh/pip,graingert/pip,dstufft/pip,Ivoz/pip,ncoghlan/pip,ncoghlan/pip,Ivoz/pip,jasonkying/pip,yati-sagade/pip,zorosteven/pip,qwcode/pip,prasaianooz/pip,sigmavirus24/pip,habnabit/pip,qwcode/pip,ChristopherHogan/pip,Gabriel439/pip,willingc/pip,h4ck3rm1k3/pip,luzfcb/pip,mujiansu/pip,zorosteven/pip,squidsoup/pip,sigmavirus24/pip,xavfernandez/pip,qbdsoft/pip,alex/pip,graingert/pip,haridsv/pip,mindw/pip,jythontools/pip,jamezpolley/pip,techtonik/pip,fiber-space/pip,RonnyPfannschmidt/pip,alquerci/pip,benesch/pip,yati-sagade/pip,ChristopherHogan/pip,pradyunsg/pip,davidovich/pip,xavfernandez/pip,ianw/pip,tdsmith/pip,alex/pip,caosmo/pip,wkeyword/pip,rbtcollins/pip,msabramo/pip,atdaemon/pip,supriyantomaftuh/pip,esc/pip,msabramo/pip,chaoallsome/pip,harrisonfeng/pip,qbdsoft/pip,alex/pip,rouge8/pip,Carreau/pip,harrisonfeng/pip,luzfcb/pip,wkeyword/pip,RonnyPfannschmidt/pip,caosmo/pip,minrk/pip,wkeyword/pip,atdaemon/pip,patricklaw/pip,radiosilence/pip,KarelJakubec/pip,KarelJakubec/pip,natefoo/pip,pfmoore/pip,sbidoul/pip,pjdelport/pip,nthall/pip,natefoo/pip,nthall/pip,haridsv/pip,xavfernandez/pip,chaoallsome/pip,erikrose/pip,rouge8/pip,zenlambda/pip,jmagnusson/pip,James-Firth/pip,yati-sagade/pip,luzfcb/pip,mattrobenolt/pip,tdsmith/pip
|
Add unit tests for bazaar URL handling
Tested on 2.4.4, 2.7.1 and 3.2
nosetests tests.test_vcs_bazaar passes in each, full setup.py test gives:
2.4:
----------------------------------------------------------------------
Ran 107 tests in 167.345s
OK
2.7 fails 1 on Test freezing a mercurial clone
Script result: python setup.py develop
-- stderr: --------------------
warning: no files found matching '*.html' under directory 'docs'
warning: no previously-included files matching '*.txt' found under directory 'docs/build'
no previously-included directories found matching 'docs/build/html/_sources'
3.2
I got a bunch of failures, mostly environments (bzr, hg) and fragility I think.
----------------------------------------------------------------------
Ran 107 tests in 289.871s
FAILED (failures=5, errors=6, skipped=2)
|
from tests.test_pip import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
https_bzr_repo = Bazaar(url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ssh_bzr_repo = Bazaar(url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ftp_bzr_repo = Bazaar(url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
sftp_bzr_repo = Bazaar(url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
launchpad_bzr_repo = Bazaar(url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject')
assert http_bzr_repo.get_url_rev() == ('http://bzr.myproject.org/MyProject/trunk/', None)
assert https_bzr_repo.get_url_rev() == ('https://bzr.myproject.org/MyProject/trunk/', None)
assert ssh_bzr_repo.get_url_rev() == ('bzr+ssh://bzr.myproject.org/MyProject/trunk/', None)
assert ftp_bzr_repo.get_url_rev() == ('ftp://bzr.myproject.org/MyProject/trunk/', None)
assert sftp_bzr_repo.get_url_rev() == ('sftp://bzr.myproject.org/MyProject/trunk/', None)
assert launchpad_bzr_repo.get_url_rev() == ('lp:MyLaunchpadProject', None)
|
<commit_before><commit_msg>Add unit tests for bazaar URL handling
Tested on 2.4.4, 2.7.1 and 3.2
nosetests tests.test_vcs_bazaar passes in each, full setup.py test gives:
2.4:
----------------------------------------------------------------------
Ran 107 tests in 167.345s
OK
2.7 fails 1 on Test freezing a mercurial clone
Script result: python setup.py develop
-- stderr: --------------------
warning: no files found matching '*.html' under directory 'docs'
warning: no previously-included files matching '*.txt' found under directory 'docs/build'
no previously-included directories found matching 'docs/build/html/_sources'
3.2
I got a bunch of failures, mostly environments (bzr, hg) and fragility I think.
----------------------------------------------------------------------
Ran 107 tests in 289.871s
FAILED (failures=5, errors=6, skipped=2)<commit_after>
|
from tests.test_pip import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
https_bzr_repo = Bazaar(url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ssh_bzr_repo = Bazaar(url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ftp_bzr_repo = Bazaar(url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
sftp_bzr_repo = Bazaar(url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
launchpad_bzr_repo = Bazaar(url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject')
assert http_bzr_repo.get_url_rev() == ('http://bzr.myproject.org/MyProject/trunk/', None)
assert https_bzr_repo.get_url_rev() == ('https://bzr.myproject.org/MyProject/trunk/', None)
assert ssh_bzr_repo.get_url_rev() == ('bzr+ssh://bzr.myproject.org/MyProject/trunk/', None)
assert ftp_bzr_repo.get_url_rev() == ('ftp://bzr.myproject.org/MyProject/trunk/', None)
assert sftp_bzr_repo.get_url_rev() == ('sftp://bzr.myproject.org/MyProject/trunk/', None)
assert launchpad_bzr_repo.get_url_rev() == ('lp:MyLaunchpadProject', None)
|
Add unit tests for bazaar URL handling
Tested on 2.4.4, 2.7.1 and 3.2
nosetests tests.test_vcs_bazaar passes in each, full setup.py test gives:
2.4:
----------------------------------------------------------------------
Ran 107 tests in 167.345s
OK
2.7 fails 1 on Test freezing a mercurial clone
Script result: python setup.py develop
-- stderr: --------------------
warning: no files found matching '*.html' under directory 'docs'
warning: no previously-included files matching '*.txt' found under directory 'docs/build'
no previously-included directories found matching 'docs/build/html/_sources'
3.2
I got a bunch of failures, mostly environments (bzr, hg) and fragility I think.
----------------------------------------------------------------------
Ran 107 tests in 289.871s
FAILED (failures=5, errors=6, skipped=2)from tests.test_pip import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
https_bzr_repo = Bazaar(url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ssh_bzr_repo = Bazaar(url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ftp_bzr_repo = Bazaar(url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
sftp_bzr_repo = Bazaar(url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
launchpad_bzr_repo = Bazaar(url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject')
assert http_bzr_repo.get_url_rev() == ('http://bzr.myproject.org/MyProject/trunk/', None)
assert https_bzr_repo.get_url_rev() == ('https://bzr.myproject.org/MyProject/trunk/', None)
assert ssh_bzr_repo.get_url_rev() == ('bzr+ssh://bzr.myproject.org/MyProject/trunk/', None)
assert ftp_bzr_repo.get_url_rev() == ('ftp://bzr.myproject.org/MyProject/trunk/', None)
assert sftp_bzr_repo.get_url_rev() == ('sftp://bzr.myproject.org/MyProject/trunk/', None)
assert launchpad_bzr_repo.get_url_rev() == ('lp:MyLaunchpadProject', None)
|
<commit_before><commit_msg>Add unit tests for bazaar URL handling
Tested on 2.4.4, 2.7.1 and 3.2
nosetests tests.test_vcs_bazaar passes in each, full setup.py test gives:
2.4:
----------------------------------------------------------------------
Ran 107 tests in 167.345s
OK
2.7 fails 1 on Test freezing a mercurial clone
Script result: python setup.py develop
-- stderr: --------------------
warning: no files found matching '*.html' under directory 'docs'
warning: no previously-included files matching '*.txt' found under directory 'docs/build'
no previously-included directories found matching 'docs/build/html/_sources'
3.2
I got a bunch of failures, mostly environments (bzr, hg) and fragility I think.
----------------------------------------------------------------------
Ran 107 tests in 289.871s
FAILED (failures=5, errors=6, skipped=2)<commit_after>from tests.test_pip import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
https_bzr_repo = Bazaar(url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ssh_bzr_repo = Bazaar(url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
ftp_bzr_repo = Bazaar(url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
sftp_bzr_repo = Bazaar(url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject')
launchpad_bzr_repo = Bazaar(url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject')
assert http_bzr_repo.get_url_rev() == ('http://bzr.myproject.org/MyProject/trunk/', None)
assert https_bzr_repo.get_url_rev() == ('https://bzr.myproject.org/MyProject/trunk/', None)
assert ssh_bzr_repo.get_url_rev() == ('bzr+ssh://bzr.myproject.org/MyProject/trunk/', None)
assert ftp_bzr_repo.get_url_rev() == ('ftp://bzr.myproject.org/MyProject/trunk/', None)
assert sftp_bzr_repo.get_url_rev() == ('sftp://bzr.myproject.org/MyProject/trunk/', None)
assert launchpad_bzr_repo.get_url_rev() == ('lp:MyLaunchpadProject', None)
|
|
a1a552857498206b6684681eb457978dd5adf710
|
nap/rest/mapper.py
|
nap/rest/mapper.py
|
'''
Mixins for using Mappers with Publisher
'''
from django.core.exceptions import ValidationError
from nap import http
from nap.utils import flatten_errors
class MapperListMixin(object):
def list_get_default(self, request, action, object_id):
'''
Replace the default list handler with one that returns a flat list.
'''
object_list = self.get_object_list()
object_list = self.filter_object_list(object_list)
object_list = self.sort_object_list(object_list)
mapper = self.mapper()
data = [
mapper << obj
for obj in object_list
]
return self.create_response(data)
class MapperDetailMixin(object):
def object_get_default(self, request, action, object_id):
obj = self.get_object(object_id)
mapper = self.mapper(obj)
return self.create_response(mapper._reduce())
class MapperPostMixin(object):
'''
Generic handling of POST-to-create
'''
def list_post_default(self, request, action, object_id):
data = self.get_request_data({})
mapper = self.mapper(self.model())
try:
obj = mapper._apply(data, full=True)
except ValidationError as e:
return self.post_invalid(e.error_dict)
else:
return self.post_valid(obj)
def post_valid(self, obj):
obj.save()
return http.Created()
def post_invalid(self, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class PutMixin(object):
'''
Generic handling of PUT-to-update
'''
def object_put_default(self, request, action, object_id):
data = self.get_request_data({})
obj = self.get_object(object_id)
mapper = self.mapper(obj)
try:
mapper._apply(data)
except ValidationError as e:
return self.put_invalid(obj, e.error_dict)
return self.put_valid(obj, data)
return http.Accepted()
def put_valid(self, obj, data):
'''
Hook to control updating of objects.
Will be passes the unsaved updated model instance.
Default: save the object.
'''
obj.save()
return obj
def put_invalid(self, obj, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class DeleteMixin(object):
'''
Generic handling of DELETE-to-disable
'''
def object_delete_default(self, request, action, object_id):
obj = self.get_object(object_id)
self.delete_object(obj)
return http.ResetContent()
def delete_object(self, obj):
'''
Hook to allow control of how to delete an instance.
'''
obj.delete()
|
Add Publisher mixins for using Mappers
|
Add Publisher mixins for using Mappers
|
Python
|
bsd-3-clause
|
limbera/django-nap,MarkusH/django-nap
|
Add Publisher mixins for using Mappers
|
'''
Mixins for using Mappers with Publisher
'''
from django.core.exceptions import ValidationError
from nap import http
from nap.utils import flatten_errors
class MapperListMixin(object):
def list_get_default(self, request, action, object_id):
'''
Replace the default list handler with one that returns a flat list.
'''
object_list = self.get_object_list()
object_list = self.filter_object_list(object_list)
object_list = self.sort_object_list(object_list)
mapper = self.mapper()
data = [
mapper << obj
for obj in object_list
]
return self.create_response(data)
class MapperDetailMixin(object):
def object_get_default(self, request, action, object_id):
obj = self.get_object(object_id)
mapper = self.mapper(obj)
return self.create_response(mapper._reduce())
class MapperPostMixin(object):
'''
Generic handling of POST-to-create
'''
def list_post_default(self, request, action, object_id):
data = self.get_request_data({})
mapper = self.mapper(self.model())
try:
obj = mapper._apply(data, full=True)
except ValidationError as e:
return self.post_invalid(e.error_dict)
else:
return self.post_valid(obj)
def post_valid(self, obj):
obj.save()
return http.Created()
def post_invalid(self, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class PutMixin(object):
'''
Generic handling of PUT-to-update
'''
def object_put_default(self, request, action, object_id):
data = self.get_request_data({})
obj = self.get_object(object_id)
mapper = self.mapper(obj)
try:
mapper._apply(data)
except ValidationError as e:
return self.put_invalid(obj, e.error_dict)
return self.put_valid(obj, data)
return http.Accepted()
def put_valid(self, obj, data):
'''
Hook to control updating of objects.
Will be passes the unsaved updated model instance.
Default: save the object.
'''
obj.save()
return obj
def put_invalid(self, obj, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class DeleteMixin(object):
'''
Generic handling of DELETE-to-disable
'''
def object_delete_default(self, request, action, object_id):
obj = self.get_object(object_id)
self.delete_object(obj)
return http.ResetContent()
def delete_object(self, obj):
'''
Hook to allow control of how to delete an instance.
'''
obj.delete()
|
<commit_before><commit_msg>Add Publisher mixins for using Mappers<commit_after>
|
'''
Mixins for using Mappers with Publisher
'''
from django.core.exceptions import ValidationError
from nap import http
from nap.utils import flatten_errors
class MapperListMixin(object):
def list_get_default(self, request, action, object_id):
'''
Replace the default list handler with one that returns a flat list.
'''
object_list = self.get_object_list()
object_list = self.filter_object_list(object_list)
object_list = self.sort_object_list(object_list)
mapper = self.mapper()
data = [
mapper << obj
for obj in object_list
]
return self.create_response(data)
class MapperDetailMixin(object):
def object_get_default(self, request, action, object_id):
obj = self.get_object(object_id)
mapper = self.mapper(obj)
return self.create_response(mapper._reduce())
class MapperPostMixin(object):
'''
Generic handling of POST-to-create
'''
def list_post_default(self, request, action, object_id):
data = self.get_request_data({})
mapper = self.mapper(self.model())
try:
obj = mapper._apply(data, full=True)
except ValidationError as e:
return self.post_invalid(e.error_dict)
else:
return self.post_valid(obj)
def post_valid(self, obj):
obj.save()
return http.Created()
def post_invalid(self, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class PutMixin(object):
'''
Generic handling of PUT-to-update
'''
def object_put_default(self, request, action, object_id):
data = self.get_request_data({})
obj = self.get_object(object_id)
mapper = self.mapper(obj)
try:
mapper._apply(data)
except ValidationError as e:
return self.put_invalid(obj, e.error_dict)
return self.put_valid(obj, data)
return http.Accepted()
def put_valid(self, obj, data):
'''
Hook to control updating of objects.
Will be passes the unsaved updated model instance.
Default: save the object.
'''
obj.save()
return obj
def put_invalid(self, obj, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class DeleteMixin(object):
'''
Generic handling of DELETE-to-disable
'''
def object_delete_default(self, request, action, object_id):
obj = self.get_object(object_id)
self.delete_object(obj)
return http.ResetContent()
def delete_object(self, obj):
'''
Hook to allow control of how to delete an instance.
'''
obj.delete()
|
Add Publisher mixins for using Mappers'''
Mixins for using Mappers with Publisher
'''
from django.core.exceptions import ValidationError
from nap import http
from nap.utils import flatten_errors
class MapperListMixin(object):
def list_get_default(self, request, action, object_id):
'''
Replace the default list handler with one that returns a flat list.
'''
object_list = self.get_object_list()
object_list = self.filter_object_list(object_list)
object_list = self.sort_object_list(object_list)
mapper = self.mapper()
data = [
mapper << obj
for obj in object_list
]
return self.create_response(data)
class MapperDetailMixin(object):
def object_get_default(self, request, action, object_id):
obj = self.get_object(object_id)
mapper = self.mapper(obj)
return self.create_response(mapper._reduce())
class MapperPostMixin(object):
'''
Generic handling of POST-to-create
'''
def list_post_default(self, request, action, object_id):
data = self.get_request_data({})
mapper = self.mapper(self.model())
try:
obj = mapper._apply(data, full=True)
except ValidationError as e:
return self.post_invalid(e.error_dict)
else:
return self.post_valid(obj)
def post_valid(self, obj):
obj.save()
return http.Created()
def post_invalid(self, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class PutMixin(object):
'''
Generic handling of PUT-to-update
'''
def object_put_default(self, request, action, object_id):
data = self.get_request_data({})
obj = self.get_object(object_id)
mapper = self.mapper(obj)
try:
mapper._apply(data)
except ValidationError as e:
return self.put_invalid(obj, e.error_dict)
return self.put_valid(obj, data)
return http.Accepted()
def put_valid(self, obj, data):
'''
Hook to control updating of objects.
Will be passes the unsaved updated model instance.
Default: save the object.
'''
obj.save()
return obj
def put_invalid(self, obj, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class DeleteMixin(object):
'''
Generic handling of DELETE-to-disable
'''
def object_delete_default(self, request, action, object_id):
obj = self.get_object(object_id)
self.delete_object(obj)
return http.ResetContent()
def delete_object(self, obj):
'''
Hook to allow control of how to delete an instance.
'''
obj.delete()
|
<commit_before><commit_msg>Add Publisher mixins for using Mappers<commit_after>'''
Mixins for using Mappers with Publisher
'''
from django.core.exceptions import ValidationError
from nap import http
from nap.utils import flatten_errors
class MapperListMixin(object):
def list_get_default(self, request, action, object_id):
'''
Replace the default list handler with one that returns a flat list.
'''
object_list = self.get_object_list()
object_list = self.filter_object_list(object_list)
object_list = self.sort_object_list(object_list)
mapper = self.mapper()
data = [
mapper << obj
for obj in object_list
]
return self.create_response(data)
class MapperDetailMixin(object):
def object_get_default(self, request, action, object_id):
obj = self.get_object(object_id)
mapper = self.mapper(obj)
return self.create_response(mapper._reduce())
class MapperPostMixin(object):
'''
Generic handling of POST-to-create
'''
def list_post_default(self, request, action, object_id):
data = self.get_request_data({})
mapper = self.mapper(self.model())
try:
obj = mapper._apply(data, full=True)
except ValidationError as e:
return self.post_invalid(e.error_dict)
else:
return self.post_valid(obj)
def post_valid(self, obj):
obj.save()
return http.Created()
def post_invalid(self, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class PutMixin(object):
'''
Generic handling of PUT-to-update
'''
def object_put_default(self, request, action, object_id):
data = self.get_request_data({})
obj = self.get_object(object_id)
mapper = self.mapper(obj)
try:
mapper._apply(data)
except ValidationError as e:
return self.put_invalid(obj, e.error_dict)
return self.put_valid(obj, data)
return http.Accepted()
def put_valid(self, obj, data):
'''
Hook to control updating of objects.
Will be passes the unsaved updated model instance.
Default: save the object.
'''
obj.save()
return obj
def put_invalid(self, obj, errors):
return self.create_response(flatten_errors(errors),
response_class=http.BadRequest)
class DeleteMixin(object):
'''
Generic handling of DELETE-to-disable
'''
def object_delete_default(self, request, action, object_id):
obj = self.get_object(object_id)
self.delete_object(obj)
return http.ResetContent()
def delete_object(self, obj):
'''
Hook to allow control of how to delete an instance.
'''
obj.delete()
|
|
ff50744e2dd1f04959e4f7f63ff3a08d21ef9839
|
pint-numpy-type.py
|
pint-numpy-type.py
|
# A variable imbued with pint unit mutates its state when touched by Numpy.
# Source https://github.com/hgrecco/pint/blob/master/pint/quantity.py#L1165-L1167
# Since https://github.com/hgrecco/pint/commit/53d5fca35948a5bb80cb900e8e692e8206b1512a
import pint # 0.7.2
import numpy as np # 1.11.1
units = pint.UnitRegistry()
x = 1 * units.s
print(type(x._magnitude), x._magnitude)
np.array(x) # <-- This affects x. Note ignored return value. Expected dead code.
print(type(x._magnitude), x._magnitude)
# Worse, it breaks round().
print(round(x))
print(round(y)) # TypeError: type numpy.ndarray doesn't define __round__ method
|
Add pint numpy type snippet
|
Add pint numpy type snippet
|
Python
|
mit
|
cmey/surprising-snippets,cmey/surprising-snippets
|
Add pint numpy type snippet
|
# A variable imbued with pint unit mutates its state when touched by Numpy.
# Source https://github.com/hgrecco/pint/blob/master/pint/quantity.py#L1165-L1167
# Since https://github.com/hgrecco/pint/commit/53d5fca35948a5bb80cb900e8e692e8206b1512a
import pint # 0.7.2
import numpy as np # 1.11.1
units = pint.UnitRegistry()
x = 1 * units.s
print(type(x._magnitude), x._magnitude)
np.array(x) # <-- This affects x. Note ignored return value. Expected dead code.
print(type(x._magnitude), x._magnitude)
# Worse, it breaks round().
print(round(x))
print(round(y)) # TypeError: type numpy.ndarray doesn't define __round__ method
|
<commit_before><commit_msg>Add pint numpy type snippet<commit_after>
|
# A variable imbued with pint unit mutates its state when touched by Numpy.
# Source https://github.com/hgrecco/pint/blob/master/pint/quantity.py#L1165-L1167
# Since https://github.com/hgrecco/pint/commit/53d5fca35948a5bb80cb900e8e692e8206b1512a
import pint # 0.7.2
import numpy as np # 1.11.1
units = pint.UnitRegistry()
x = 1 * units.s
print(type(x._magnitude), x._magnitude)
np.array(x) # <-- This affects x. Note ignored return value. Expected dead code.
print(type(x._magnitude), x._magnitude)
# Worse, it breaks round().
print(round(x))
print(round(y)) # TypeError: type numpy.ndarray doesn't define __round__ method
|
Add pint numpy type snippet# A variable imbued with pint unit mutates its state when touched by Numpy.
# Source https://github.com/hgrecco/pint/blob/master/pint/quantity.py#L1165-L1167
# Since https://github.com/hgrecco/pint/commit/53d5fca35948a5bb80cb900e8e692e8206b1512a
import pint # 0.7.2
import numpy as np # 1.11.1
units = pint.UnitRegistry()
x = 1 * units.s
print(type(x._magnitude), x._magnitude)
np.array(x) # <-- This affects x. Note ignored return value. Expected dead code.
print(type(x._magnitude), x._magnitude)
# Worse, it breaks round().
print(round(x))
print(round(y)) # TypeError: type numpy.ndarray doesn't define __round__ method
|
<commit_before><commit_msg>Add pint numpy type snippet<commit_after># A variable imbued with pint unit mutates its state when touched by Numpy.
# Source https://github.com/hgrecco/pint/blob/master/pint/quantity.py#L1165-L1167
# Since https://github.com/hgrecco/pint/commit/53d5fca35948a5bb80cb900e8e692e8206b1512a
import pint # 0.7.2
import numpy as np # 1.11.1
units = pint.UnitRegistry()
x = 1 * units.s
print(type(x._magnitude), x._magnitude)
np.array(x) # <-- This affects x. Note ignored return value. Expected dead code.
print(type(x._magnitude), x._magnitude)
# Worse, it breaks round().
print(round(x))
print(round(y)) # TypeError: type numpy.ndarray doesn't define __round__ method
|
|
bf8c008742dd921865f18f10b1c925c485406343
|
django_lightweight_queue/management/commands/queue_configuration.py
|
django_lightweight_queue/management/commands/queue_configuration.py
|
from django.core.management.base import NoArgsCommand
from ... import app_settings
class Command(NoArgsCommand):
def handle_noargs(self, **options):
print "django-lightweight-queue"
print "========================"
print
print "{0:<15} {1:>5}".format("Queue name", "Concurrency")
print "-" * 27
for k, v in app_settings.WORKERS.iteritems():
print " {0:<14} {1}".format(k, v)
print
print "Middleware:"
for x in app_settings.MIDDLEWARE:
print " * %s" % x
print
print "Backend: %s" % app_settings.BACKEND
|
Add a quick way to show the current queue configuration.
|
Add a quick way to show the current queue configuration.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
Python
|
bsd-3-clause
|
prophile/django-lightweight-queue,prophile/django-lightweight-queue,thread/django-lightweight-queue,lamby/django-lightweight-queue,thread/django-lightweight-queue
|
Add a quick way to show the current queue configuration.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
from django.core.management.base import NoArgsCommand
from ... import app_settings
class Command(NoArgsCommand):
def handle_noargs(self, **options):
print "django-lightweight-queue"
print "========================"
print
print "{0:<15} {1:>5}".format("Queue name", "Concurrency")
print "-" * 27
for k, v in app_settings.WORKERS.iteritems():
print " {0:<14} {1}".format(k, v)
print
print "Middleware:"
for x in app_settings.MIDDLEWARE:
print " * %s" % x
print
print "Backend: %s" % app_settings.BACKEND
|
<commit_before><commit_msg>Add a quick way to show the current queue configuration.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>
|
from django.core.management.base import NoArgsCommand
from ... import app_settings
class Command(NoArgsCommand):
def handle_noargs(self, **options):
print "django-lightweight-queue"
print "========================"
print
print "{0:<15} {1:>5}".format("Queue name", "Concurrency")
print "-" * 27
for k, v in app_settings.WORKERS.iteritems():
print " {0:<14} {1}".format(k, v)
print
print "Middleware:"
for x in app_settings.MIDDLEWARE:
print " * %s" % x
print
print "Backend: %s" % app_settings.BACKEND
|
Add a quick way to show the current queue configuration.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>from django.core.management.base import NoArgsCommand
from ... import app_settings
class Command(NoArgsCommand):
def handle_noargs(self, **options):
print "django-lightweight-queue"
print "========================"
print
print "{0:<15} {1:>5}".format("Queue name", "Concurrency")
print "-" * 27
for k, v in app_settings.WORKERS.iteritems():
print " {0:<14} {1}".format(k, v)
print
print "Middleware:"
for x in app_settings.MIDDLEWARE:
print " * %s" % x
print
print "Backend: %s" % app_settings.BACKEND
|
<commit_before><commit_msg>Add a quick way to show the current queue configuration.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>from django.core.management.base import NoArgsCommand
from ... import app_settings
class Command(NoArgsCommand):
def handle_noargs(self, **options):
print "django-lightweight-queue"
print "========================"
print
print "{0:<15} {1:>5}".format("Queue name", "Concurrency")
print "-" * 27
for k, v in app_settings.WORKERS.iteritems():
print " {0:<14} {1}".format(k, v)
print
print "Middleware:"
for x in app_settings.MIDDLEWARE:
print " * %s" % x
print
print "Backend: %s" % app_settings.BACKEND
|
|
f54f1cea9a839e97b27103638530fb030ca81a6a
|
app/utils/scripts/update-defconfig.py
|
app/utils/scripts/update-defconfig.py
|
#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Convert the defconfig_id fields into the new build_id one."""
import multiprocessing
import models
import utils
import utils.db
database = utils.db.get_db_connection({})
def add_build_type_field(collection_name):
collection = database[collection_name]
for doc in collection.find():
ret_val = utils.db.update2(
collection,
{models.ID_KEY: doc[models.ID_KEY]},
{"$set": {models.BUILD_TYPE_KEY: models.KERNEL_BUILD_TYPE}}
)
if ret_val == 500:
utils.LOG.error(
"Error updating build document %s", str(doc[models.ID_KEY]))
def convert_defconfig_id(collection_name):
utils.LOG.info("Converting collection %s", collection_name)
collection = database[collection_name]
update_doc = {
"$set": None,
"$unset": {"defconfig_id": ""}
}
for doc in collection.find():
update_doc["$set"] = {
models.BUILD_ID_KEY: doc.get("defconfig_id", None)
}
ret_val = utils.db.update2(
collection, {models.ID_KEY: doc[models.ID_KEY]}, update_doc)
if ret_val == 500:
utils.LOG.error(
"Error updating document %s", str(doc[models.ID_KEY]))
if __name__ == "__main__":
process_pool = multiprocessing.Pool(4)
process_pool.map(
convert_defconfig_id,
[
models.BOOT_COLLECTION,
models.TEST_SUITE_COLLECTION,
models.ERROR_LOGS_COLLECTION
]
)
process_pool.apply(add_build_type_field, (models.BUILD_COLLECTION,))
process_pool.close()
process_pool.join()
|
Add utility script to convert/update builds.
|
Add utility script to convert/update builds.
* Moving to mass-rename everything into build, the script is
used to convert defconfig_id fields into build_id.
* Add "kernel" type to build documents.
|
Python
|
lgpl-2.1
|
kernelci/kernelci-backend,kernelci/kernelci-backend
|
Add utility script to convert/update builds.
* Moving to mass-rename everything into build, the script is
used to convert defconfig_id fields into build_id.
* Add "kernel" type to build documents.
|
#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Convert the defconfig_id fields into the new build_id one."""
import multiprocessing
import models
import utils
import utils.db
database = utils.db.get_db_connection({})
def add_build_type_field(collection_name):
collection = database[collection_name]
for doc in collection.find():
ret_val = utils.db.update2(
collection,
{models.ID_KEY: doc[models.ID_KEY]},
{"$set": {models.BUILD_TYPE_KEY: models.KERNEL_BUILD_TYPE}}
)
if ret_val == 500:
utils.LOG.error(
"Error updating build document %s", str(doc[models.ID_KEY]))
def convert_defconfig_id(collection_name):
utils.LOG.info("Converting collection %s", collection_name)
collection = database[collection_name]
update_doc = {
"$set": None,
"$unset": {"defconfig_id": ""}
}
for doc in collection.find():
update_doc["$set"] = {
models.BUILD_ID_KEY: doc.get("defconfig_id", None)
}
ret_val = utils.db.update2(
collection, {models.ID_KEY: doc[models.ID_KEY]}, update_doc)
if ret_val == 500:
utils.LOG.error(
"Error updating document %s", str(doc[models.ID_KEY]))
if __name__ == "__main__":
process_pool = multiprocessing.Pool(4)
process_pool.map(
convert_defconfig_id,
[
models.BOOT_COLLECTION,
models.TEST_SUITE_COLLECTION,
models.ERROR_LOGS_COLLECTION
]
)
process_pool.apply(add_build_type_field, (models.BUILD_COLLECTION,))
process_pool.close()
process_pool.join()
|
<commit_before><commit_msg>Add utility script to convert/update builds.
* Moving to mass-rename everything into build, the script is
used to convert defconfig_id fields into build_id.
* Add "kernel" type to build documents.<commit_after>
|
#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Convert the defconfig_id fields into the new build_id one."""
import multiprocessing
import models
import utils
import utils.db
database = utils.db.get_db_connection({})
def add_build_type_field(collection_name):
collection = database[collection_name]
for doc in collection.find():
ret_val = utils.db.update2(
collection,
{models.ID_KEY: doc[models.ID_KEY]},
{"$set": {models.BUILD_TYPE_KEY: models.KERNEL_BUILD_TYPE}}
)
if ret_val == 500:
utils.LOG.error(
"Error updating build document %s", str(doc[models.ID_KEY]))
def convert_defconfig_id(collection_name):
utils.LOG.info("Converting collection %s", collection_name)
collection = database[collection_name]
update_doc = {
"$set": None,
"$unset": {"defconfig_id": ""}
}
for doc in collection.find():
update_doc["$set"] = {
models.BUILD_ID_KEY: doc.get("defconfig_id", None)
}
ret_val = utils.db.update2(
collection, {models.ID_KEY: doc[models.ID_KEY]}, update_doc)
if ret_val == 500:
utils.LOG.error(
"Error updating document %s", str(doc[models.ID_KEY]))
if __name__ == "__main__":
process_pool = multiprocessing.Pool(4)
process_pool.map(
convert_defconfig_id,
[
models.BOOT_COLLECTION,
models.TEST_SUITE_COLLECTION,
models.ERROR_LOGS_COLLECTION
]
)
process_pool.apply(add_build_type_field, (models.BUILD_COLLECTION,))
process_pool.close()
process_pool.join()
|
Add utility script to convert/update builds.
* Moving to mass-rename everything into build, the script is
used to convert defconfig_id fields into build_id.
* Add "kernel" type to build documents.#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Convert the defconfig_id fields into the new build_id one."""
import multiprocessing
import models
import utils
import utils.db
database = utils.db.get_db_connection({})
def add_build_type_field(collection_name):
collection = database[collection_name]
for doc in collection.find():
ret_val = utils.db.update2(
collection,
{models.ID_KEY: doc[models.ID_KEY]},
{"$set": {models.BUILD_TYPE_KEY: models.KERNEL_BUILD_TYPE}}
)
if ret_val == 500:
utils.LOG.error(
"Error updating build document %s", str(doc[models.ID_KEY]))
def convert_defconfig_id(collection_name):
utils.LOG.info("Converting collection %s", collection_name)
collection = database[collection_name]
update_doc = {
"$set": None,
"$unset": {"defconfig_id": ""}
}
for doc in collection.find():
update_doc["$set"] = {
models.BUILD_ID_KEY: doc.get("defconfig_id", None)
}
ret_val = utils.db.update2(
collection, {models.ID_KEY: doc[models.ID_KEY]}, update_doc)
if ret_val == 500:
utils.LOG.error(
"Error updating document %s", str(doc[models.ID_KEY]))
if __name__ == "__main__":
process_pool = multiprocessing.Pool(4)
process_pool.map(
convert_defconfig_id,
[
models.BOOT_COLLECTION,
models.TEST_SUITE_COLLECTION,
models.ERROR_LOGS_COLLECTION
]
)
process_pool.apply(add_build_type_field, (models.BUILD_COLLECTION,))
process_pool.close()
process_pool.join()
|
<commit_before><commit_msg>Add utility script to convert/update builds.
* Moving to mass-rename everything into build, the script is
used to convert defconfig_id fields into build_id.
* Add "kernel" type to build documents.<commit_after>#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Convert the defconfig_id fields into the new build_id one."""
import multiprocessing
import models
import utils
import utils.db
database = utils.db.get_db_connection({})
def add_build_type_field(collection_name):
collection = database[collection_name]
for doc in collection.find():
ret_val = utils.db.update2(
collection,
{models.ID_KEY: doc[models.ID_KEY]},
{"$set": {models.BUILD_TYPE_KEY: models.KERNEL_BUILD_TYPE}}
)
if ret_val == 500:
utils.LOG.error(
"Error updating build document %s", str(doc[models.ID_KEY]))
def convert_defconfig_id(collection_name):
utils.LOG.info("Converting collection %s", collection_name)
collection = database[collection_name]
update_doc = {
"$set": None,
"$unset": {"defconfig_id": ""}
}
for doc in collection.find():
update_doc["$set"] = {
models.BUILD_ID_KEY: doc.get("defconfig_id", None)
}
ret_val = utils.db.update2(
collection, {models.ID_KEY: doc[models.ID_KEY]}, update_doc)
if ret_val == 500:
utils.LOG.error(
"Error updating document %s", str(doc[models.ID_KEY]))
if __name__ == "__main__":
process_pool = multiprocessing.Pool(4)
process_pool.map(
convert_defconfig_id,
[
models.BOOT_COLLECTION,
models.TEST_SUITE_COLLECTION,
models.ERROR_LOGS_COLLECTION
]
)
process_pool.apply(add_build_type_field, (models.BUILD_COLLECTION,))
process_pool.close()
process_pool.join()
|
|
6708fd75eb7272701e8e333e4940e47d5b6a05af
|
plugin_tests/web_client_test.py
|
plugin_tests/web_client_test.py
|
from tests import web_client_test
setUpModule = web_client_test.setUpModule
tearDownModule = web_client_test.tearDownModule
class WebClientTestCase(web_client_test.WebClientTestCase):
def setUp(self):
super(WebClientTestCase, self).setUp()
self.model('user').createUser(
login='minerva-admin',
password='minerva-password!',
email='minerva@email.com',
firstName='Min',
lastName='Erva',
admin=True
)
|
Add a custom client side test runner
|
Add a custom client side test runner
|
Python
|
apache-2.0
|
Kitware/minerva,Kitware/minerva,Kitware/minerva
|
Add a custom client side test runner
|
from tests import web_client_test
setUpModule = web_client_test.setUpModule
tearDownModule = web_client_test.tearDownModule
class WebClientTestCase(web_client_test.WebClientTestCase):
def setUp(self):
super(WebClientTestCase, self).setUp()
self.model('user').createUser(
login='minerva-admin',
password='minerva-password!',
email='minerva@email.com',
firstName='Min',
lastName='Erva',
admin=True
)
|
<commit_before><commit_msg>Add a custom client side test runner<commit_after>
|
from tests import web_client_test
setUpModule = web_client_test.setUpModule
tearDownModule = web_client_test.tearDownModule
class WebClientTestCase(web_client_test.WebClientTestCase):
def setUp(self):
super(WebClientTestCase, self).setUp()
self.model('user').createUser(
login='minerva-admin',
password='minerva-password!',
email='minerva@email.com',
firstName='Min',
lastName='Erva',
admin=True
)
|
Add a custom client side test runnerfrom tests import web_client_test
setUpModule = web_client_test.setUpModule
tearDownModule = web_client_test.tearDownModule
class WebClientTestCase(web_client_test.WebClientTestCase):
def setUp(self):
super(WebClientTestCase, self).setUp()
self.model('user').createUser(
login='minerva-admin',
password='minerva-password!',
email='minerva@email.com',
firstName='Min',
lastName='Erva',
admin=True
)
|
<commit_before><commit_msg>Add a custom client side test runner<commit_after>from tests import web_client_test
setUpModule = web_client_test.setUpModule
tearDownModule = web_client_test.tearDownModule
class WebClientTestCase(web_client_test.WebClientTestCase):
def setUp(self):
super(WebClientTestCase, self).setUp()
self.model('user').createUser(
login='minerva-admin',
password='minerva-password!',
email='minerva@email.com',
firstName='Min',
lastName='Erva',
admin=True
)
|
|
e77957398f78f905c6b8ac881b621c67c1352d0a
|
py/target-sum.py
|
py/target-sum.py
|
from collections import Counter
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
c = Counter()
c[0] = 1
for n in nums:
nc = Counter()
for k, v in c.iteritems():
nc[k + n] += v
nc[k - n] += v
c = nc
return c[S]
|
Add py solution for 494. Target Sum
|
Add py solution for 494. Target Sum
494. Target Sum: https://leetcode.com/problems/target-sum/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 494. Target Sum
494. Target Sum: https://leetcode.com/problems/target-sum/
|
from collections import Counter
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
c = Counter()
c[0] = 1
for n in nums:
nc = Counter()
for k, v in c.iteritems():
nc[k + n] += v
nc[k - n] += v
c = nc
return c[S]
|
<commit_before><commit_msg>Add py solution for 494. Target Sum
494. Target Sum: https://leetcode.com/problems/target-sum/<commit_after>
|
from collections import Counter
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
c = Counter()
c[0] = 1
for n in nums:
nc = Counter()
for k, v in c.iteritems():
nc[k + n] += v
nc[k - n] += v
c = nc
return c[S]
|
Add py solution for 494. Target Sum
494. Target Sum: https://leetcode.com/problems/target-sum/from collections import Counter
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
c = Counter()
c[0] = 1
for n in nums:
nc = Counter()
for k, v in c.iteritems():
nc[k + n] += v
nc[k - n] += v
c = nc
return c[S]
|
<commit_before><commit_msg>Add py solution for 494. Target Sum
494. Target Sum: https://leetcode.com/problems/target-sum/<commit_after>from collections import Counter
class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
c = Counter()
c[0] = 1
for n in nums:
nc = Counter()
for k, v in c.iteritems():
nc[k + n] += v
nc[k - n] += v
c = nc
return c[S]
|
|
cbf29ea21179dcf3cf9c5f0a7eacd664f025f5db
|
tools/scripts/remove_package.py
|
tools/scripts/remove_package.py
|
import fnmatch
import os
import re
re_reference = re.compile(r'\<Reference Include=\"([\w\.]+)\,.*?\<\/Reference\>', re.DOTALL | re.MULTILINE)
def process_csproj(file, delete_package):
proj = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
proj_new = re_reference.sub(repl, proj)
if proj != proj_new:
print "!", file
open(file, "wb").write(proj_new)
re_package = re.compile(r'\<package id\="([\w\.]*)".*?\/\>')
def process_package(file, delete_package):
pack = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
pack_new = re_package.sub(repl, pack)
if pack != pack_new:
print "!", file
open(file, "wb").write(pack_new)
def do_recursive(start_path, delete_package):
projs = []
packages = []
for root, dirnames, filenames in os.walk('../../'):
for filename in fnmatch.filter(filenames, '*.csproj'):
process_csproj(os.path.join(root, filename), delete_package)
for filename in fnmatch.filter(filenames, 'packages.config'):
process_package(os.path.join(root, filename), delete_package)
#do_recursive('../../', 'NetLegacySupport.Action')
#do_recursive('../../', 'NetLegacySupport.ConcurrentDictionary')
#do_recursive('../../', 'NetLegacySupport.Tuple')
#do_recursive('../../', 'TypeAlias')
|
Add small utility for removing package references
|
Add small utility for removing package references
|
Python
|
mit
|
SaladLab/Akka.Interfaced,SaladbowlCreative/Akka.Interfaced,SaladbowlCreative/Akka.Interfaced,SaladLab/Akka.Interfaced
|
Add small utility for removing package references
|
import fnmatch
import os
import re
re_reference = re.compile(r'\<Reference Include=\"([\w\.]+)\,.*?\<\/Reference\>', re.DOTALL | re.MULTILINE)
def process_csproj(file, delete_package):
proj = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
proj_new = re_reference.sub(repl, proj)
if proj != proj_new:
print "!", file
open(file, "wb").write(proj_new)
re_package = re.compile(r'\<package id\="([\w\.]*)".*?\/\>')
def process_package(file, delete_package):
pack = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
pack_new = re_package.sub(repl, pack)
if pack != pack_new:
print "!", file
open(file, "wb").write(pack_new)
def do_recursive(start_path, delete_package):
projs = []
packages = []
for root, dirnames, filenames in os.walk('../../'):
for filename in fnmatch.filter(filenames, '*.csproj'):
process_csproj(os.path.join(root, filename), delete_package)
for filename in fnmatch.filter(filenames, 'packages.config'):
process_package(os.path.join(root, filename), delete_package)
#do_recursive('../../', 'NetLegacySupport.Action')
#do_recursive('../../', 'NetLegacySupport.ConcurrentDictionary')
#do_recursive('../../', 'NetLegacySupport.Tuple')
#do_recursive('../../', 'TypeAlias')
|
<commit_before><commit_msg>Add small utility for removing package references<commit_after>
|
import fnmatch
import os
import re
re_reference = re.compile(r'\<Reference Include=\"([\w\.]+)\,.*?\<\/Reference\>', re.DOTALL | re.MULTILINE)
def process_csproj(file, delete_package):
proj = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
proj_new = re_reference.sub(repl, proj)
if proj != proj_new:
print "!", file
open(file, "wb").write(proj_new)
re_package = re.compile(r'\<package id\="([\w\.]*)".*?\/\>')
def process_package(file, delete_package):
pack = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
pack_new = re_package.sub(repl, pack)
if pack != pack_new:
print "!", file
open(file, "wb").write(pack_new)
def do_recursive(start_path, delete_package):
projs = []
packages = []
for root, dirnames, filenames in os.walk('../../'):
for filename in fnmatch.filter(filenames, '*.csproj'):
process_csproj(os.path.join(root, filename), delete_package)
for filename in fnmatch.filter(filenames, 'packages.config'):
process_package(os.path.join(root, filename), delete_package)
#do_recursive('../../', 'NetLegacySupport.Action')
#do_recursive('../../', 'NetLegacySupport.ConcurrentDictionary')
#do_recursive('../../', 'NetLegacySupport.Tuple')
#do_recursive('../../', 'TypeAlias')
|
Add small utility for removing package referencesimport fnmatch
import os
import re
re_reference = re.compile(r'\<Reference Include=\"([\w\.]+)\,.*?\<\/Reference\>', re.DOTALL | re.MULTILINE)
def process_csproj(file, delete_package):
proj = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
proj_new = re_reference.sub(repl, proj)
if proj != proj_new:
print "!", file
open(file, "wb").write(proj_new)
re_package = re.compile(r'\<package id\="([\w\.]*)".*?\/\>')
def process_package(file, delete_package):
pack = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
pack_new = re_package.sub(repl, pack)
if pack != pack_new:
print "!", file
open(file, "wb").write(pack_new)
def do_recursive(start_path, delete_package):
projs = []
packages = []
for root, dirnames, filenames in os.walk('../../'):
for filename in fnmatch.filter(filenames, '*.csproj'):
process_csproj(os.path.join(root, filename), delete_package)
for filename in fnmatch.filter(filenames, 'packages.config'):
process_package(os.path.join(root, filename), delete_package)
#do_recursive('../../', 'NetLegacySupport.Action')
#do_recursive('../../', 'NetLegacySupport.ConcurrentDictionary')
#do_recursive('../../', 'NetLegacySupport.Tuple')
#do_recursive('../../', 'TypeAlias')
|
<commit_before><commit_msg>Add small utility for removing package references<commit_after>import fnmatch
import os
import re
re_reference = re.compile(r'\<Reference Include=\"([\w\.]+)\,.*?\<\/Reference\>', re.DOTALL | re.MULTILINE)
def process_csproj(file, delete_package):
proj = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
proj_new = re_reference.sub(repl, proj)
if proj != proj_new:
print "!", file
open(file, "wb").write(proj_new)
re_package = re.compile(r'\<package id\="([\w\.]*)".*?\/\>')
def process_package(file, delete_package):
pack = open(file).read()
def repl(mo):
if mo.group(1) == delete_package:
return ""
else:
return mo.group(0)
pack_new = re_package.sub(repl, pack)
if pack != pack_new:
print "!", file
open(file, "wb").write(pack_new)
def do_recursive(start_path, delete_package):
projs = []
packages = []
for root, dirnames, filenames in os.walk('../../'):
for filename in fnmatch.filter(filenames, '*.csproj'):
process_csproj(os.path.join(root, filename), delete_package)
for filename in fnmatch.filter(filenames, 'packages.config'):
process_package(os.path.join(root, filename), delete_package)
#do_recursive('../../', 'NetLegacySupport.Action')
#do_recursive('../../', 'NetLegacySupport.ConcurrentDictionary')
#do_recursive('../../', 'NetLegacySupport.Tuple')
#do_recursive('../../', 'TypeAlias')
|
|
f7fd11d657f10b2cb29000a29fcf839da6dea921
|
test/python/topology/test2_submission_params.py
|
test/python/topology/test2_submission_params.py
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import print_function
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import test_vers
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
from streamsx.spl.types import Timestamp
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSubmissionParams(unittest.TestCase):
""" Test submission params.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
tester.test(self.test_ctxtype, self.test_config)
|
Add initial Python submission parameter test
|
Add initial Python submission parameter test
|
Python
|
apache-2.0
|
ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,ddebrunner/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology,IBMStreams/streamsx.topology
|
Add initial Python submission parameter test
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import print_function
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import test_vers
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
from streamsx.spl.types import Timestamp
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSubmissionParams(unittest.TestCase):
""" Test submission params.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
tester.test(self.test_ctxtype, self.test_config)
|
<commit_before><commit_msg>Add initial Python submission parameter test<commit_after>
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import print_function
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import test_vers
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
from streamsx.spl.types import Timestamp
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSubmissionParams(unittest.TestCase):
""" Test submission params.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
tester.test(self.test_ctxtype, self.test_config)
|
Add initial Python submission parameter test# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import print_function
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import test_vers
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
from streamsx.spl.types import Timestamp
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSubmissionParams(unittest.TestCase):
""" Test submission params.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
tester.test(self.test_ctxtype, self.test_config)
|
<commit_before><commit_msg>Add initial Python submission parameter test<commit_after># coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
from __future__ import print_function
import unittest
import sys
import itertools
from enum import IntEnum
import datetime
import decimal
import test_vers
from streamsx.topology.schema import StreamSchema
from streamsx.topology.topology import *
from streamsx.topology.tester import Tester
from streamsx.topology.context import JobConfig
import streamsx.spl.op as op
from streamsx.spl.types import Timestamp
@unittest.skipIf(not test_vers.tester_supported() , "tester not supported")
class TestSubmissionParams(unittest.TestCase):
""" Test submission params.
"""
def setUp(self):
Tester.setup_distributed(self)
def test_spl(self):
"""
Test passing as an SPL parameter.
"""
N=22
t = ''.join(random.choice('0123456789abcdef') for x in range(20))
topic = 'topology/test/python/' + t
topo = Topology()
spTopic = topo.create_submission_parameter('mytopic')
sch = StreamSchema('tuple<uint64 seq, rstring s>')
b = op.Source(topo, "spl.utility::Beacon", sch,
params = {'initDelay': 10.0, 'period': 0.02, 'iterations':N})
b.seq = b.output('IterationCount()')
p = op.Sink("com.ibm.streamsx.topology.topic::Publish", b.stream,
params={'topic': topic})
s = op.Source(topo, "com.ibm.streamsx.topology.topic::Subscribe", sch,
params = {'streamType': sch, 'topic': spTopic})
jc = JobConfig()
jc.submission_parameters['mytopic'] = topic
jc.add(self.test_config)
tester = Tester(topo)
tester.tuple_count(s.stream, N)
tester.test(self.test_ctxtype, self.test_config)
|
|
0fa5e9944d573d053633b1ea81497bc20598abee
|
CodeFights/longestWord.py
|
CodeFights/longestWord.py
|
#!/usr/local/bin/python
# Code Fights Longest Word Problem
import re
def longestWord(text):
m = re.findall(r'\b[a-z]+?\b', text, flags=re.I)
return max(m, key=len)
def main():
tests = [
["Ready, steady, go!", "steady"],
["Ready[[[, steady, go!", "steady"],
["ABCd", "ABCd"]
]
for t in tests:
res = longestWord(t[0])
ans = t[1]
if ans == res:
print("PASSED: longestWord({}) returned {}"
.format(t[0], res))
else:
print("FAILED: longestWord({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights longest word problem
|
Solve Code Fights longest word problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights longest word problem
|
#!/usr/local/bin/python
# Code Fights Longest Word Problem
import re
def longestWord(text):
m = re.findall(r'\b[a-z]+?\b', text, flags=re.I)
return max(m, key=len)
def main():
tests = [
["Ready, steady, go!", "steady"],
["Ready[[[, steady, go!", "steady"],
["ABCd", "ABCd"]
]
for t in tests:
res = longestWord(t[0])
ans = t[1]
if ans == res:
print("PASSED: longestWord({}) returned {}"
.format(t[0], res))
else:
print("FAILED: longestWord({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights longest word problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Longest Word Problem
import re
def longestWord(text):
m = re.findall(r'\b[a-z]+?\b', text, flags=re.I)
return max(m, key=len)
def main():
tests = [
["Ready, steady, go!", "steady"],
["Ready[[[, steady, go!", "steady"],
["ABCd", "ABCd"]
]
for t in tests:
res = longestWord(t[0])
ans = t[1]
if ans == res:
print("PASSED: longestWord({}) returned {}"
.format(t[0], res))
else:
print("FAILED: longestWord({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights longest word problem#!/usr/local/bin/python
# Code Fights Longest Word Problem
import re
def longestWord(text):
m = re.findall(r'\b[a-z]+?\b', text, flags=re.I)
return max(m, key=len)
def main():
tests = [
["Ready, steady, go!", "steady"],
["Ready[[[, steady, go!", "steady"],
["ABCd", "ABCd"]
]
for t in tests:
res = longestWord(t[0])
ans = t[1]
if ans == res:
print("PASSED: longestWord({}) returned {}"
.format(t[0], res))
else:
print("FAILED: longestWord({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights longest word problem<commit_after>#!/usr/local/bin/python
# Code Fights Longest Word Problem
import re
def longestWord(text):
m = re.findall(r'\b[a-z]+?\b', text, flags=re.I)
return max(m, key=len)
def main():
tests = [
["Ready, steady, go!", "steady"],
["Ready[[[, steady, go!", "steady"],
["ABCd", "ABCd"]
]
for t in tests:
res = longestWord(t[0])
ans = t[1]
if ans == res:
print("PASSED: longestWord({}) returned {}"
.format(t[0], res))
else:
print("FAILED: longestWord({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
31b46f0ab99b945a97e3dd08d7e8d6a9a63ad75a
|
array/922.py
|
array/922.py
|
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
if not A:
return A
size = len(A)
even = 0
odd = 1
while even < size - 1 and odd < size:
while even < size - 1 and A[even] % 2 == 0:
even += 2
while odd < size and A[odd] % 2 == 1:
odd += 2
if even < size - 1 and odd < size:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
return A
|
Sort Array By Parity II
|
Sort Array By Parity II
|
Python
|
apache-2.0
|
MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode
|
Sort Array By Parity II
|
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
if not A:
return A
size = len(A)
even = 0
odd = 1
while even < size - 1 and odd < size:
while even < size - 1 and A[even] % 2 == 0:
even += 2
while odd < size and A[odd] % 2 == 1:
odd += 2
if even < size - 1 and odd < size:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
return A
|
<commit_before><commit_msg>Sort Array By Parity II<commit_after>
|
class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
if not A:
return A
size = len(A)
even = 0
odd = 1
while even < size - 1 and odd < size:
while even < size - 1 and A[even] % 2 == 0:
even += 2
while odd < size and A[odd] % 2 == 1:
odd += 2
if even < size - 1 and odd < size:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
return A
|
Sort Array By Parity IIclass Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
if not A:
return A
size = len(A)
even = 0
odd = 1
while even < size - 1 and odd < size:
while even < size - 1 and A[even] % 2 == 0:
even += 2
while odd < size and A[odd] % 2 == 1:
odd += 2
if even < size - 1 and odd < size:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
return A
|
<commit_before><commit_msg>Sort Array By Parity II<commit_after>class Solution:
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
if not A:
return A
size = len(A)
even = 0
odd = 1
while even < size - 1 and odd < size:
while even < size - 1 and A[even] % 2 == 0:
even += 2
while odd < size and A[odd] % 2 == 1:
odd += 2
if even < size - 1 and odd < size:
temp = A[even]
A[even] = A[odd]
A[odd] = temp
return A
|
|
eade3fa4f4d53574f359b9006b4d36b1bf428d49
|
tests/pluginregistry.py
|
tests/pluginregistry.py
|
#!/usr/bin/env python2
import Cura.PluginRegistry
p = Cura.PluginRegistry.PluginRegistry()
p.addPluginLocation("plugins")
p._populateMetaData()
#p.loadPlugin("ExamplePlugin")
print(p.getMetaData("ExamplePlugin"))
|
Add a tiny test application for testing the plugin registry
|
Add a tiny test application for testing the plugin registry
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add a tiny test application for testing the plugin registry
|
#!/usr/bin/env python2
import Cura.PluginRegistry
p = Cura.PluginRegistry.PluginRegistry()
p.addPluginLocation("plugins")
p._populateMetaData()
#p.loadPlugin("ExamplePlugin")
print(p.getMetaData("ExamplePlugin"))
|
<commit_before><commit_msg>Add a tiny test application for testing the plugin registry<commit_after>
|
#!/usr/bin/env python2
import Cura.PluginRegistry
p = Cura.PluginRegistry.PluginRegistry()
p.addPluginLocation("plugins")
p._populateMetaData()
#p.loadPlugin("ExamplePlugin")
print(p.getMetaData("ExamplePlugin"))
|
Add a tiny test application for testing the plugin registry#!/usr/bin/env python2
import Cura.PluginRegistry
p = Cura.PluginRegistry.PluginRegistry()
p.addPluginLocation("plugins")
p._populateMetaData()
#p.loadPlugin("ExamplePlugin")
print(p.getMetaData("ExamplePlugin"))
|
<commit_before><commit_msg>Add a tiny test application for testing the plugin registry<commit_after>#!/usr/bin/env python2
import Cura.PluginRegistry
p = Cura.PluginRegistry.PluginRegistry()
p.addPluginLocation("plugins")
p._populateMetaData()
#p.loadPlugin("ExamplePlugin")
print(p.getMetaData("ExamplePlugin"))
|
|
c6fbba9da9d1cf2a5a0007a56d192e267d19fcff
|
flexget/utils/database.py
|
flexget/utils/database.py
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
passed_session = kwargs.get('session')
if not passed_session:
session = Session(autoflush=True)
try:
return func(*args, session=session, **kwargs)
finally:
session.close()
else:
return func(*args, **kwargs)
return wrapper
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
if not kwargs.get('session'):
kwargs['session'] = Session(autoflush=True)
try:
return func(*args, **kwargs)
finally:
kwargs['session'].close()
else:
return func(*args, **kwargs)
return wrapper
|
Fix with_session decorator for python 2.5
|
Fix with_session decorator for python 2.5
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1957 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
Python
|
mit
|
vfrc2/Flexget,tobinjt/Flexget,Pretagonist/Flexget,oxc/Flexget,Danfocus/Flexget,jacobmetrick/Flexget,Flexget/Flexget,spencerjanssen/Flexget,ianstalk/Flexget,xfouloux/Flexget,malkavi/Flexget,asm0dey/Flexget,v17al/Flexget,camon/Flexget,ZefQ/Flexget,cvium/Flexget,jacobmetrick/Flexget,crawln45/Flexget,camon/Flexget,OmgOhnoes/Flexget,tvcsantos/Flexget,spencerjanssen/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,LynxyssCZ/Flexget,jacobmetrick/Flexget,patsissons/Flexget,JorisDeRieck/Flexget,patsissons/Flexget,qvazzler/Flexget,lildadou/Flexget,xfouloux/Flexget,sean797/Flexget,patsissons/Flexget,gazpachoking/Flexget,JorisDeRieck/Flexget,lildadou/Flexget,offbyone/Flexget,malkavi/Flexget,drwyrm/Flexget,OmgOhnoes/Flexget,jawilson/Flexget,v17al/Flexget,dsemi/Flexget,poulpito/Flexget,sean797/Flexget,jawilson/Flexget,antivirtel/Flexget,asm0dey/Flexget,antivirtel/Flexget,poulpito/Flexget,ZefQ/Flexget,antivirtel/Flexget,ianstalk/Flexget,qk4l/Flexget,LynxyssCZ/Flexget,ratoaq2/Flexget,crawln45/Flexget,drwyrm/Flexget,drwyrm/Flexget,ibrahimkarahan/Flexget,ZefQ/Flexget,lildadou/Flexget,oxc/Flexget,sean797/Flexget,ianstalk/Flexget,tarzasai/Flexget,malkavi/Flexget,tarzasai/Flexget,vfrc2/Flexget,crawln45/Flexget,tsnoam/Flexget,voriux/Flexget,qvazzler/Flexget,xfouloux/Flexget,spencerjanssen/Flexget,malkavi/Flexget,qk4l/Flexget,Danfocus/Flexget,ibrahimkarahan/Flexget,tobinjt/Flexget,dsemi/Flexget,OmgOhnoes/Flexget,v17al/Flexget,qvazzler/Flexget,Flexget/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,dsemi/Flexget,tobinjt/Flexget,thalamus/Flexget,grrr2/Flexget,thalamus/Flexget,cvium/Flexget,tsnoam/Flexget,grrr2/Flexget,LynxyssCZ/Flexget,oxc/Flexget,Danfocus/Flexget,X-dark/Flexget,vfrc2/Flexget,tarzasai/Flexget,thalamus/Flexget,qk4l/Flexget,Pretagonist/Flexget,ratoaq2/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,ibrahimkarahan/Flexget,cvium/Flexget,poulpito/Flexget,crawln45/Flexget,offbyone/Flexget,X-dark/Flexget,grrr2/Flexget,Flexget/Flexget,Pretagonist/Flexget,Danfocus/Flexget,X-dark/Flexget,voriux/Flexget,jawilson/Flexget,tvcsantos/Flexget,ratoaq2/Flexget,tobinjt/Flexget,asm0dey/Flexget
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
passed_session = kwargs.get('session')
if not passed_session:
session = Session(autoflush=True)
try:
return func(*args, session=session, **kwargs)
finally:
session.close()
else:
return func(*args, **kwargs)
return wrapper
Fix with_session decorator for python 2.5
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1957 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
if not kwargs.get('session'):
kwargs['session'] = Session(autoflush=True)
try:
return func(*args, **kwargs)
finally:
kwargs['session'].close()
else:
return func(*args, **kwargs)
return wrapper
|
<commit_before>from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
passed_session = kwargs.get('session')
if not passed_session:
session = Session(autoflush=True)
try:
return func(*args, session=session, **kwargs)
finally:
session.close()
else:
return func(*args, **kwargs)
return wrapper
<commit_msg>Fix with_session decorator for python 2.5
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1957 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
if not kwargs.get('session'):
kwargs['session'] = Session(autoflush=True)
try:
return func(*args, **kwargs)
finally:
kwargs['session'].close()
else:
return func(*args, **kwargs)
return wrapper
|
from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
passed_session = kwargs.get('session')
if not passed_session:
session = Session(autoflush=True)
try:
return func(*args, session=session, **kwargs)
finally:
session.close()
else:
return func(*args, **kwargs)
return wrapper
Fix with_session decorator for python 2.5
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1957 3942dd89-8c5d-46d7-aeed-044bccf3e60cfrom flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
if not kwargs.get('session'):
kwargs['session'] = Session(autoflush=True)
try:
return func(*args, **kwargs)
finally:
kwargs['session'].close()
else:
return func(*args, **kwargs)
return wrapper
|
<commit_before>from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
passed_session = kwargs.get('session')
if not passed_session:
session = Session(autoflush=True)
try:
return func(*args, session=session, **kwargs)
finally:
session.close()
else:
return func(*args, **kwargs)
return wrapper
<commit_msg>Fix with_session decorator for python 2.5
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1957 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>from flexget.manager import Session
def with_session(func):
""""Creates a session if one was not passed via keyword argument to the function"""
def wrapper(*args, **kwargs):
if not kwargs.get('session'):
kwargs['session'] = Session(autoflush=True)
try:
return func(*args, **kwargs)
finally:
kwargs['session'].close()
else:
return func(*args, **kwargs)
return wrapper
|
544857a7a3cdb40fd793fed8e33694d551cc695f
|
pomodoro_calculator/main.py
|
pomodoro_calculator/main.py
|
"""Calculate the number of Pomodori available within a time period.
Usage:
get-pomodori [--from=<time>] [--break=<minutes>] [--long-break=<minutes>] <end-time>
get-pomodori (-h | --help | --version)
Options:
--version show program's version number and exit.
-h, --help show this help message and exit.
-f, --from=<time> calculate available pomodori from this time [default: now].
-b, --break=<minutes> the amount of minutes between each pomodori [default: 5].
-l, --long-break=<minutes> the amount of mintues between every five pomodori [default: 15].
"""
from docopt import docopt
def main():
docopt(__doc__, version='0.2')
if __name__ == '__main__':
main()
|
Add initial version of the commandline tool
|
Add initial version of the commandline tool
|
Python
|
mit
|
Matt-Deacalion/Pomodoro-Calculator
|
Add initial version of the commandline tool
|
"""Calculate the number of Pomodori available within a time period.
Usage:
get-pomodori [--from=<time>] [--break=<minutes>] [--long-break=<minutes>] <end-time>
get-pomodori (-h | --help | --version)
Options:
--version show program's version number and exit.
-h, --help show this help message and exit.
-f, --from=<time> calculate available pomodori from this time [default: now].
-b, --break=<minutes> the amount of minutes between each pomodori [default: 5].
-l, --long-break=<minutes> the amount of mintues between every five pomodori [default: 15].
"""
from docopt import docopt
def main():
docopt(__doc__, version='0.2')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial version of the commandline tool<commit_after>
|
"""Calculate the number of Pomodori available within a time period.
Usage:
get-pomodori [--from=<time>] [--break=<minutes>] [--long-break=<minutes>] <end-time>
get-pomodori (-h | --help | --version)
Options:
--version show program's version number and exit.
-h, --help show this help message and exit.
-f, --from=<time> calculate available pomodori from this time [default: now].
-b, --break=<minutes> the amount of minutes between each pomodori [default: 5].
-l, --long-break=<minutes> the amount of mintues between every five pomodori [default: 15].
"""
from docopt import docopt
def main():
docopt(__doc__, version='0.2')
if __name__ == '__main__':
main()
|
Add initial version of the commandline tool"""Calculate the number of Pomodori available within a time period.
Usage:
get-pomodori [--from=<time>] [--break=<minutes>] [--long-break=<minutes>] <end-time>
get-pomodori (-h | --help | --version)
Options:
--version show program's version number and exit.
-h, --help show this help message and exit.
-f, --from=<time> calculate available pomodori from this time [default: now].
-b, --break=<minutes> the amount of minutes between each pomodori [default: 5].
-l, --long-break=<minutes> the amount of mintues between every five pomodori [default: 15].
"""
from docopt import docopt
def main():
docopt(__doc__, version='0.2')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add initial version of the commandline tool<commit_after>"""Calculate the number of Pomodori available within a time period.
Usage:
get-pomodori [--from=<time>] [--break=<minutes>] [--long-break=<minutes>] <end-time>
get-pomodori (-h | --help | --version)
Options:
--version show program's version number and exit.
-h, --help show this help message and exit.
-f, --from=<time> calculate available pomodori from this time [default: now].
-b, --break=<minutes> the amount of minutes between each pomodori [default: 5].
-l, --long-break=<minutes> the amount of mintues between every five pomodori [default: 15].
"""
from docopt import docopt
def main():
docopt(__doc__, version='0.2')
if __name__ == '__main__':
main()
|
|
08573c7c6eb20c7cf168d38a6566757b08fed6d9
|
python/opencv/opencv_2/videos/capture_video_from_camera.py
|
python/opencv/opencv_2/videos/capture_video_from_camera.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Read image: read an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).
|
Add a snippet (Python OpenCV).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python OpenCV).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Read image: read an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Read image: read an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Read image: read an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Read image: read an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera
"""
from __future__ import print_function
import cv2 as cv
def main():
device_number = 0
video_capture = cv.VideoCapture(device_number)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
|
e0d631b4aab431c31689ccd7aa6ac92d95e32e80
|
tests/test_frontend.py
|
tests/test_frontend.py
|
import os
from tvrenamr.cli import helpers
from .base import BaseTest
class TestFrontEnd(BaseTest):
def setup(self):
super(TestFrontEnd, self).setup()
self.config = helpers.get_config()
def test_passing_current_dir_makes_file_list_a_list(self):
assert isinstance(helpers.build_file_list([self.files]), list)
def test_setting_recursive_adds_all_files_below_the_folder(self):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(self.files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(self.files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([self.files], recursive=True)
for root, dirs, files in os.walk(self.files):
for fn in files:
assert os.path.join(root, fn) in file_list
def test_ignoring_files(self):
ignore = self.random_files(self.files)
file_list = helpers.build_file_list([self.files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
import collections
import os
import sys
from tvrenamr.cli import helpers
from .utils import random_files
def test_passing_current_dir_makes_file_list_a_list(files):
file_list = helpers.build_file_list([files])
assert isinstance(file_list, collections.Iterable)
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
text_type = str if PY3 else unicode
assert not isinstance(file_list, (string_type, text_type))
def test_setting_recursive_adds_all_files_below_the_folder(files):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([files], recursive=True)
for root, dirs, files in os.walk(files):
for fn in files:
assert (root, fn) in file_list
def test_ignoring_files(files):
ignore = random_files(files)
file_list = helpers.build_file_list([files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
Move to function only tests & fix test for generator based build_file_list
|
Move to function only tests & fix test for generator based build_file_list
build_file_list is a generator now so we need to make sure it returns an
iterable but not a string.
|
Python
|
mit
|
wintersandroid/tvrenamr,ghickman/tvrenamr
|
import os
from tvrenamr.cli import helpers
from .base import BaseTest
class TestFrontEnd(BaseTest):
def setup(self):
super(TestFrontEnd, self).setup()
self.config = helpers.get_config()
def test_passing_current_dir_makes_file_list_a_list(self):
assert isinstance(helpers.build_file_list([self.files]), list)
def test_setting_recursive_adds_all_files_below_the_folder(self):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(self.files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(self.files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([self.files], recursive=True)
for root, dirs, files in os.walk(self.files):
for fn in files:
assert os.path.join(root, fn) in file_list
def test_ignoring_files(self):
ignore = self.random_files(self.files)
file_list = helpers.build_file_list([self.files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
Move to function only tests & fix test for generator based build_file_list
build_file_list is a generator now so we need to make sure it returns an
iterable but not a string.
|
import collections
import os
import sys
from tvrenamr.cli import helpers
from .utils import random_files
def test_passing_current_dir_makes_file_list_a_list(files):
file_list = helpers.build_file_list([files])
assert isinstance(file_list, collections.Iterable)
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
text_type = str if PY3 else unicode
assert not isinstance(file_list, (string_type, text_type))
def test_setting_recursive_adds_all_files_below_the_folder(files):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([files], recursive=True)
for root, dirs, files in os.walk(files):
for fn in files:
assert (root, fn) in file_list
def test_ignoring_files(files):
ignore = random_files(files)
file_list = helpers.build_file_list([files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
<commit_before>import os
from tvrenamr.cli import helpers
from .base import BaseTest
class TestFrontEnd(BaseTest):
def setup(self):
super(TestFrontEnd, self).setup()
self.config = helpers.get_config()
def test_passing_current_dir_makes_file_list_a_list(self):
assert isinstance(helpers.build_file_list([self.files]), list)
def test_setting_recursive_adds_all_files_below_the_folder(self):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(self.files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(self.files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([self.files], recursive=True)
for root, dirs, files in os.walk(self.files):
for fn in files:
assert os.path.join(root, fn) in file_list
def test_ignoring_files(self):
ignore = self.random_files(self.files)
file_list = helpers.build_file_list([self.files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
<commit_msg>Move to function only tests & fix test for generator based build_file_list
build_file_list is a generator now so we need to make sure it returns an
iterable but not a string.<commit_after>
|
import collections
import os
import sys
from tvrenamr.cli import helpers
from .utils import random_files
def test_passing_current_dir_makes_file_list_a_list(files):
file_list = helpers.build_file_list([files])
assert isinstance(file_list, collections.Iterable)
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
text_type = str if PY3 else unicode
assert not isinstance(file_list, (string_type, text_type))
def test_setting_recursive_adds_all_files_below_the_folder(files):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([files], recursive=True)
for root, dirs, files in os.walk(files):
for fn in files:
assert (root, fn) in file_list
def test_ignoring_files(files):
ignore = random_files(files)
file_list = helpers.build_file_list([files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
import os
from tvrenamr.cli import helpers
from .base import BaseTest
class TestFrontEnd(BaseTest):
def setup(self):
super(TestFrontEnd, self).setup()
self.config = helpers.get_config()
def test_passing_current_dir_makes_file_list_a_list(self):
assert isinstance(helpers.build_file_list([self.files]), list)
def test_setting_recursive_adds_all_files_below_the_folder(self):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(self.files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(self.files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([self.files], recursive=True)
for root, dirs, files in os.walk(self.files):
for fn in files:
assert os.path.join(root, fn) in file_list
def test_ignoring_files(self):
ignore = self.random_files(self.files)
file_list = helpers.build_file_list([self.files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
Move to function only tests & fix test for generator based build_file_list
build_file_list is a generator now so we need to make sure it returns an
iterable but not a string.import collections
import os
import sys
from tvrenamr.cli import helpers
from .utils import random_files
def test_passing_current_dir_makes_file_list_a_list(files):
file_list = helpers.build_file_list([files])
assert isinstance(file_list, collections.Iterable)
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
text_type = str if PY3 else unicode
assert not isinstance(file_list, (string_type, text_type))
def test_setting_recursive_adds_all_files_below_the_folder(files):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([files], recursive=True)
for root, dirs, files in os.walk(files):
for fn in files:
assert (root, fn) in file_list
def test_ignoring_files(files):
ignore = random_files(files)
file_list = helpers.build_file_list([files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
<commit_before>import os
from tvrenamr.cli import helpers
from .base import BaseTest
class TestFrontEnd(BaseTest):
def setup(self):
super(TestFrontEnd, self).setup()
self.config = helpers.get_config()
def test_passing_current_dir_makes_file_list_a_list(self):
assert isinstance(helpers.build_file_list([self.files]), list)
def test_setting_recursive_adds_all_files_below_the_folder(self):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(self.files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(self.files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([self.files], recursive=True)
for root, dirs, files in os.walk(self.files):
for fn in files:
assert os.path.join(root, fn) in file_list
def test_ignoring_files(self):
ignore = self.random_files(self.files)
file_list = helpers.build_file_list([self.files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
<commit_msg>Move to function only tests & fix test for generator based build_file_list
build_file_list is a generator now so we need to make sure it returns an
iterable but not a string.<commit_after>import collections
import os
import sys
from tvrenamr.cli import helpers
from .utils import random_files
def test_passing_current_dir_makes_file_list_a_list(files):
file_list = helpers.build_file_list([files])
assert isinstance(file_list, collections.Iterable)
PY3 = sys.version_info[0] == 3
string_type = str if PY3 else basestring
text_type = str if PY3 else unicode
assert not isinstance(file_list, (string_type, text_type))
def test_setting_recursive_adds_all_files_below_the_folder(files):
new_folders = ('herp', 'derp', 'test')
os.makedirs(os.path.join(files, *new_folders))
def build_folder(folder):
new_files = ('foo', 'bar', 'blah')
for fn in new_files:
with open(os.path.join(files, folder, fn), 'w') as f:
f.write('')
build_folder('herp')
build_folder('herp/derp')
build_folder('herp/derp/test')
file_list = helpers.build_file_list([files], recursive=True)
for root, dirs, files in os.walk(files):
for fn in files:
assert (root, fn) in file_list
def test_ignoring_files(files):
ignore = random_files(files)
file_list = helpers.build_file_list([files], ignore_filelist=ignore)
assert all(fn not in file_list for fn in ignore)
|
d47d7931f5531c4fe28598d15c305592e446af2b
|
tests/test_settings.py
|
tests/test_settings.py
|
def test_settings_group(plex):
assert plex.settings.group('general')
def test_settings_get(plex):
# This is the value since it we havnt set any friendlyname
# plex just default to computer name but it NOT in the settings.
assert plex.settings.get('FriendlyName').value == ''
def test_settings_get(plex):
cd = plex.settings.get('collectUsageData')
cd.set(False)
# Save works but since we reload asap the data isnt changed.
# or it might be our caching that does this. ## TODO
plex.settings.save()
|
Add some test for settings. TODO fix save.
|
Add some test for settings. TODO fix save.
|
Python
|
bsd-3-clause
|
pkkid/python-plexapi,mjs7231/python-plexapi
|
Add some test for settings. TODO fix save.
|
def test_settings_group(plex):
assert plex.settings.group('general')
def test_settings_get(plex):
# This is the value since it we havnt set any friendlyname
# plex just default to computer name but it NOT in the settings.
assert plex.settings.get('FriendlyName').value == ''
def test_settings_get(plex):
cd = plex.settings.get('collectUsageData')
cd.set(False)
# Save works but since we reload asap the data isnt changed.
# or it might be our caching that does this. ## TODO
plex.settings.save()
|
<commit_before><commit_msg>Add some test for settings. TODO fix save.<commit_after>
|
def test_settings_group(plex):
assert plex.settings.group('general')
def test_settings_get(plex):
# This is the value since it we havnt set any friendlyname
# plex just default to computer name but it NOT in the settings.
assert plex.settings.get('FriendlyName').value == ''
def test_settings_get(plex):
cd = plex.settings.get('collectUsageData')
cd.set(False)
# Save works but since we reload asap the data isnt changed.
# or it might be our caching that does this. ## TODO
plex.settings.save()
|
Add some test for settings. TODO fix save.
def test_settings_group(plex):
assert plex.settings.group('general')
def test_settings_get(plex):
# This is the value since it we havnt set any friendlyname
# plex just default to computer name but it NOT in the settings.
assert plex.settings.get('FriendlyName').value == ''
def test_settings_get(plex):
cd = plex.settings.get('collectUsageData')
cd.set(False)
# Save works but since we reload asap the data isnt changed.
# or it might be our caching that does this. ## TODO
plex.settings.save()
|
<commit_before><commit_msg>Add some test for settings. TODO fix save.<commit_after>
def test_settings_group(plex):
assert plex.settings.group('general')
def test_settings_get(plex):
# This is the value since it we havnt set any friendlyname
# plex just default to computer name but it NOT in the settings.
assert plex.settings.get('FriendlyName').value == ''
def test_settings_get(plex):
cd = plex.settings.get('collectUsageData')
cd.set(False)
# Save works but since we reload asap the data isnt changed.
# or it might be our caching that does this. ## TODO
plex.settings.save()
|
|
0db38995d9cb7733d5dcc7bd88234c4a356fc9fb
|
util/merge_coop_nwsli.py
|
util/merge_coop_nwsli.py
|
# Need to check the log files for shef parser and create new sites
# within database based on what we find
import re, iemdb, os
MESOSITE = iemdb.connect('mesosite', bypass=False)
# Load up our database
sites = {}
for line in open('coop_nwsli.txt'):
tokens = line.split("|")
if len(tokens) < 9:
continue
sites[ tokens[0] ] = {'name': tokens[4],
'lat': tokens[6],
'lon': tokens[7],
'state': tokens[8],
'skip': False,
}
# Look for sites
for line in open('/home/ldm/logs/shef_parser.log'):
tokens = re.findall("stationID: ([A-Z1-9]{5}) ", line)
if len(tokens) == 0:
continue
nwsli = tokens[0]
if not sites.has_key(nwsli):
print 'MISSING %s' % (nwsli,)
sites[nwsli] = {'skip': True}
continue
if sites[nwsli]['skip']:
continue
sites[nwsli]['skip'] = True
# Now, we insert
mcursor = MESOSITE.cursor()
gtxt = 'SRID=4326;POINT(%s %s)' % (sites[nwsli]['lon'], sites[nwsli]['lat'])
try:
mcursor.execute("""
INSERT into stations(id, name, state, country, network, online, geom) VALUES
(%s, %s, %s, 'US', %s, 't', %s)
""", (nwsli, sites[nwsli]['name'], sites[nwsli]['state'],
'%s_COOP' % (sites[nwsli]['state'],), gtxt))
mcursor.close()
except:
pass
cmd = "/usr/bin/env python /var/www/scripts/util/addSiteMesosite.py %s_COOP %s" % (sites[nwsli]['state'], nwsli)
os.system(cmd)
MESOSITE.commit()
|
Add tool for adding COOP IDs
|
Add tool for adding COOP IDs
|
Python
|
mit
|
akrherz/pyWWA,akrherz/pyWWA
|
Add tool for adding COOP IDs
|
# Need to check the log files for shef parser and create new sites
# within database based on what we find
import re, iemdb, os
MESOSITE = iemdb.connect('mesosite', bypass=False)
# Load up our database
sites = {}
for line in open('coop_nwsli.txt'):
tokens = line.split("|")
if len(tokens) < 9:
continue
sites[ tokens[0] ] = {'name': tokens[4],
'lat': tokens[6],
'lon': tokens[7],
'state': tokens[8],
'skip': False,
}
# Look for sites
for line in open('/home/ldm/logs/shef_parser.log'):
tokens = re.findall("stationID: ([A-Z1-9]{5}) ", line)
if len(tokens) == 0:
continue
nwsli = tokens[0]
if not sites.has_key(nwsli):
print 'MISSING %s' % (nwsli,)
sites[nwsli] = {'skip': True}
continue
if sites[nwsli]['skip']:
continue
sites[nwsli]['skip'] = True
# Now, we insert
mcursor = MESOSITE.cursor()
gtxt = 'SRID=4326;POINT(%s %s)' % (sites[nwsli]['lon'], sites[nwsli]['lat'])
try:
mcursor.execute("""
INSERT into stations(id, name, state, country, network, online, geom) VALUES
(%s, %s, %s, 'US', %s, 't', %s)
""", (nwsli, sites[nwsli]['name'], sites[nwsli]['state'],
'%s_COOP' % (sites[nwsli]['state'],), gtxt))
mcursor.close()
except:
pass
cmd = "/usr/bin/env python /var/www/scripts/util/addSiteMesosite.py %s_COOP %s" % (sites[nwsli]['state'], nwsli)
os.system(cmd)
MESOSITE.commit()
|
<commit_before><commit_msg>Add tool for adding COOP IDs<commit_after>
|
# Need to check the log files for shef parser and create new sites
# within database based on what we find
import re, iemdb, os
MESOSITE = iemdb.connect('mesosite', bypass=False)
# Load up our database
sites = {}
for line in open('coop_nwsli.txt'):
tokens = line.split("|")
if len(tokens) < 9:
continue
sites[ tokens[0] ] = {'name': tokens[4],
'lat': tokens[6],
'lon': tokens[7],
'state': tokens[8],
'skip': False,
}
# Look for sites
for line in open('/home/ldm/logs/shef_parser.log'):
tokens = re.findall("stationID: ([A-Z1-9]{5}) ", line)
if len(tokens) == 0:
continue
nwsli = tokens[0]
if not sites.has_key(nwsli):
print 'MISSING %s' % (nwsli,)
sites[nwsli] = {'skip': True}
continue
if sites[nwsli]['skip']:
continue
sites[nwsli]['skip'] = True
# Now, we insert
mcursor = MESOSITE.cursor()
gtxt = 'SRID=4326;POINT(%s %s)' % (sites[nwsli]['lon'], sites[nwsli]['lat'])
try:
mcursor.execute("""
INSERT into stations(id, name, state, country, network, online, geom) VALUES
(%s, %s, %s, 'US', %s, 't', %s)
""", (nwsli, sites[nwsli]['name'], sites[nwsli]['state'],
'%s_COOP' % (sites[nwsli]['state'],), gtxt))
mcursor.close()
except:
pass
cmd = "/usr/bin/env python /var/www/scripts/util/addSiteMesosite.py %s_COOP %s" % (sites[nwsli]['state'], nwsli)
os.system(cmd)
MESOSITE.commit()
|
Add tool for adding COOP IDs# Need to check the log files for shef parser and create new sites
# within database based on what we find
import re, iemdb, os
MESOSITE = iemdb.connect('mesosite', bypass=False)
# Load up our database
sites = {}
for line in open('coop_nwsli.txt'):
tokens = line.split("|")
if len(tokens) < 9:
continue
sites[ tokens[0] ] = {'name': tokens[4],
'lat': tokens[6],
'lon': tokens[7],
'state': tokens[8],
'skip': False,
}
# Look for sites
for line in open('/home/ldm/logs/shef_parser.log'):
tokens = re.findall("stationID: ([A-Z1-9]{5}) ", line)
if len(tokens) == 0:
continue
nwsli = tokens[0]
if not sites.has_key(nwsli):
print 'MISSING %s' % (nwsli,)
sites[nwsli] = {'skip': True}
continue
if sites[nwsli]['skip']:
continue
sites[nwsli]['skip'] = True
# Now, we insert
mcursor = MESOSITE.cursor()
gtxt = 'SRID=4326;POINT(%s %s)' % (sites[nwsli]['lon'], sites[nwsli]['lat'])
try:
mcursor.execute("""
INSERT into stations(id, name, state, country, network, online, geom) VALUES
(%s, %s, %s, 'US', %s, 't', %s)
""", (nwsli, sites[nwsli]['name'], sites[nwsli]['state'],
'%s_COOP' % (sites[nwsli]['state'],), gtxt))
mcursor.close()
except:
pass
cmd = "/usr/bin/env python /var/www/scripts/util/addSiteMesosite.py %s_COOP %s" % (sites[nwsli]['state'], nwsli)
os.system(cmd)
MESOSITE.commit()
|
<commit_before><commit_msg>Add tool for adding COOP IDs<commit_after># Need to check the log files for shef parser and create new sites
# within database based on what we find
import re, iemdb, os
MESOSITE = iemdb.connect('mesosite', bypass=False)
# Load up our database
sites = {}
for line in open('coop_nwsli.txt'):
tokens = line.split("|")
if len(tokens) < 9:
continue
sites[ tokens[0] ] = {'name': tokens[4],
'lat': tokens[6],
'lon': tokens[7],
'state': tokens[8],
'skip': False,
}
# Look for sites
for line in open('/home/ldm/logs/shef_parser.log'):
tokens = re.findall("stationID: ([A-Z1-9]{5}) ", line)
if len(tokens) == 0:
continue
nwsli = tokens[0]
if not sites.has_key(nwsli):
print 'MISSING %s' % (nwsli,)
sites[nwsli] = {'skip': True}
continue
if sites[nwsli]['skip']:
continue
sites[nwsli]['skip'] = True
# Now, we insert
mcursor = MESOSITE.cursor()
gtxt = 'SRID=4326;POINT(%s %s)' % (sites[nwsli]['lon'], sites[nwsli]['lat'])
try:
mcursor.execute("""
INSERT into stations(id, name, state, country, network, online, geom) VALUES
(%s, %s, %s, 'US', %s, 't', %s)
""", (nwsli, sites[nwsli]['name'], sites[nwsli]['state'],
'%s_COOP' % (sites[nwsli]['state'],), gtxt))
mcursor.close()
except:
pass
cmd = "/usr/bin/env python /var/www/scripts/util/addSiteMesosite.py %s_COOP %s" % (sites[nwsli]['state'], nwsli)
os.system(cmd)
MESOSITE.commit()
|
|
fd9547f088b2ed15c289235c329be04590689855
|
host-test/stateless-test.py
|
host-test/stateless-test.py
|
import json
import subprocess
import struct
import sys
import unittest
import uuid
exe = None
# The protocol datagram is described here:
# https://developer.chrome.com/extensions/nativeMessaging#native-messaging-host-protocol
#
# The protocol itself is described here:
# https://github.com/open-eid/chrome-token-signing/wiki/NativeMessagingAPI
class TestSequenceFunctions(unittest.TestCase):
def tranceive(self, msg):
# send like described in
print "SEND: %s" % msg
self.p.stdin.write(struct.pack("=I", len(msg)))
self.p.stdin.write(msg)
# now read the input
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
response = str(self.p.stdout.read(response_length))
# make it into "oneline" json before printing
response_print = json.dumps(json.loads(response))
print "RECV: %s" % response_print
return json.loads(response)
def complete_msg(self, msg):
msg["nonce"] = str(uuid.uuid4())
msg["lang"] = "eng"
msg["protocol"] = "https:"
return msg
def setUp(self):
global exe
self.p = subprocess.Popen(exe, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True, stderr=None)
print "Running native component on PID %d" % self.p.pid
def test_random_string(self):
cmd = "BLAH"
self.tranceive(cmd)
def test_utopic_length(self):
self.p.stdin.write(struct.pack("=I", 0xFFFFFFFE))
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
def test_nonce_echo(self):
cmd = self.complete_msg({"type": "VERSION"})
original_nonce = cmd["nonce"]
resp = self.tranceive(json.dumps(cmd))
self.assertEqual(resp["nonce"], original_nonce)
def test_version(self):
cmd = json.dumps(self.complete_msg({"type":"VERSION"}))
resp = self.tranceive(cmd)
self.assertEqual(resp["version"], "LOCAL_BUILD")
def test_get_certificate_cancel(self):
print "PRESS CANCEL IN THE DIALOG"
cmd = json.dumps(self.complete_msg({"type":"CERT"}))
resp = self.tranceive(cmd)
if __name__ == '__main__':
if len(sys.argv) > 1:
exe = sys.argv[1]
# remove argument so that unittest.main() would work as expected
sys.argv = [sys.argv[0]]
else:
print "usage: stateless-test.py <path to executable>"
sys.exit(1)
# run tests
unittest.main()
|
Add foundation for native component testing
|
Add foundation for native component testing
|
Python
|
lgpl-2.1
|
metsma/chrome-token-signing,cristiano-andrade/chrome-token-signing,fabiorusso/chrome-token-signing,open-eid/chrome-token-signing,open-eid/chrome-token-signing,fabiorusso/chrome-token-signing,open-eid/chrome-token-signing,open-eid/chrome-token-signing,metsma/chrome-token-signing,fabiorusso/chrome-token-signing,metsma/chrome-token-signing,metsma/chrome-token-signing,cristiano-andrade/chrome-token-signing,cristiano-andrade/chrome-token-signing,fabiorusso/chrome-token-signing,open-eid/chrome-token-signing,cristiano-andrade/chrome-token-signing,metsma/chrome-token-signing
|
Add foundation for native component testing
|
import json
import subprocess
import struct
import sys
import unittest
import uuid
exe = None
# The protocol datagram is described here:
# https://developer.chrome.com/extensions/nativeMessaging#native-messaging-host-protocol
#
# The protocol itself is described here:
# https://github.com/open-eid/chrome-token-signing/wiki/NativeMessagingAPI
class TestSequenceFunctions(unittest.TestCase):
def tranceive(self, msg):
# send like described in
print "SEND: %s" % msg
self.p.stdin.write(struct.pack("=I", len(msg)))
self.p.stdin.write(msg)
# now read the input
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
response = str(self.p.stdout.read(response_length))
# make it into "oneline" json before printing
response_print = json.dumps(json.loads(response))
print "RECV: %s" % response_print
return json.loads(response)
def complete_msg(self, msg):
msg["nonce"] = str(uuid.uuid4())
msg["lang"] = "eng"
msg["protocol"] = "https:"
return msg
def setUp(self):
global exe
self.p = subprocess.Popen(exe, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True, stderr=None)
print "Running native component on PID %d" % self.p.pid
def test_random_string(self):
cmd = "BLAH"
self.tranceive(cmd)
def test_utopic_length(self):
self.p.stdin.write(struct.pack("=I", 0xFFFFFFFE))
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
def test_nonce_echo(self):
cmd = self.complete_msg({"type": "VERSION"})
original_nonce = cmd["nonce"]
resp = self.tranceive(json.dumps(cmd))
self.assertEqual(resp["nonce"], original_nonce)
def test_version(self):
cmd = json.dumps(self.complete_msg({"type":"VERSION"}))
resp = self.tranceive(cmd)
self.assertEqual(resp["version"], "LOCAL_BUILD")
def test_get_certificate_cancel(self):
print "PRESS CANCEL IN THE DIALOG"
cmd = json.dumps(self.complete_msg({"type":"CERT"}))
resp = self.tranceive(cmd)
if __name__ == '__main__':
if len(sys.argv) > 1:
exe = sys.argv[1]
# remove argument so that unittest.main() would work as expected
sys.argv = [sys.argv[0]]
else:
print "usage: stateless-test.py <path to executable>"
sys.exit(1)
# run tests
unittest.main()
|
<commit_before><commit_msg>Add foundation for native component testing<commit_after>
|
import json
import subprocess
import struct
import sys
import unittest
import uuid
exe = None
# The protocol datagram is described here:
# https://developer.chrome.com/extensions/nativeMessaging#native-messaging-host-protocol
#
# The protocol itself is described here:
# https://github.com/open-eid/chrome-token-signing/wiki/NativeMessagingAPI
class TestSequenceFunctions(unittest.TestCase):
def tranceive(self, msg):
# send like described in
print "SEND: %s" % msg
self.p.stdin.write(struct.pack("=I", len(msg)))
self.p.stdin.write(msg)
# now read the input
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
response = str(self.p.stdout.read(response_length))
# make it into "oneline" json before printing
response_print = json.dumps(json.loads(response))
print "RECV: %s" % response_print
return json.loads(response)
def complete_msg(self, msg):
msg["nonce"] = str(uuid.uuid4())
msg["lang"] = "eng"
msg["protocol"] = "https:"
return msg
def setUp(self):
global exe
self.p = subprocess.Popen(exe, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True, stderr=None)
print "Running native component on PID %d" % self.p.pid
def test_random_string(self):
cmd = "BLAH"
self.tranceive(cmd)
def test_utopic_length(self):
self.p.stdin.write(struct.pack("=I", 0xFFFFFFFE))
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
def test_nonce_echo(self):
cmd = self.complete_msg({"type": "VERSION"})
original_nonce = cmd["nonce"]
resp = self.tranceive(json.dumps(cmd))
self.assertEqual(resp["nonce"], original_nonce)
def test_version(self):
cmd = json.dumps(self.complete_msg({"type":"VERSION"}))
resp = self.tranceive(cmd)
self.assertEqual(resp["version"], "LOCAL_BUILD")
def test_get_certificate_cancel(self):
print "PRESS CANCEL IN THE DIALOG"
cmd = json.dumps(self.complete_msg({"type":"CERT"}))
resp = self.tranceive(cmd)
if __name__ == '__main__':
if len(sys.argv) > 1:
exe = sys.argv[1]
# remove argument so that unittest.main() would work as expected
sys.argv = [sys.argv[0]]
else:
print "usage: stateless-test.py <path to executable>"
sys.exit(1)
# run tests
unittest.main()
|
Add foundation for native component testingimport json
import subprocess
import struct
import sys
import unittest
import uuid
exe = None
# The protocol datagram is described here:
# https://developer.chrome.com/extensions/nativeMessaging#native-messaging-host-protocol
#
# The protocol itself is described here:
# https://github.com/open-eid/chrome-token-signing/wiki/NativeMessagingAPI
class TestSequenceFunctions(unittest.TestCase):
def tranceive(self, msg):
# send like described in
print "SEND: %s" % msg
self.p.stdin.write(struct.pack("=I", len(msg)))
self.p.stdin.write(msg)
# now read the input
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
response = str(self.p.stdout.read(response_length))
# make it into "oneline" json before printing
response_print = json.dumps(json.loads(response))
print "RECV: %s" % response_print
return json.loads(response)
def complete_msg(self, msg):
msg["nonce"] = str(uuid.uuid4())
msg["lang"] = "eng"
msg["protocol"] = "https:"
return msg
def setUp(self):
global exe
self.p = subprocess.Popen(exe, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True, stderr=None)
print "Running native component on PID %d" % self.p.pid
def test_random_string(self):
cmd = "BLAH"
self.tranceive(cmd)
def test_utopic_length(self):
self.p.stdin.write(struct.pack("=I", 0xFFFFFFFE))
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
def test_nonce_echo(self):
cmd = self.complete_msg({"type": "VERSION"})
original_nonce = cmd["nonce"]
resp = self.tranceive(json.dumps(cmd))
self.assertEqual(resp["nonce"], original_nonce)
def test_version(self):
cmd = json.dumps(self.complete_msg({"type":"VERSION"}))
resp = self.tranceive(cmd)
self.assertEqual(resp["version"], "LOCAL_BUILD")
def test_get_certificate_cancel(self):
print "PRESS CANCEL IN THE DIALOG"
cmd = json.dumps(self.complete_msg({"type":"CERT"}))
resp = self.tranceive(cmd)
if __name__ == '__main__':
if len(sys.argv) > 1:
exe = sys.argv[1]
# remove argument so that unittest.main() would work as expected
sys.argv = [sys.argv[0]]
else:
print "usage: stateless-test.py <path to executable>"
sys.exit(1)
# run tests
unittest.main()
|
<commit_before><commit_msg>Add foundation for native component testing<commit_after>import json
import subprocess
import struct
import sys
import unittest
import uuid
exe = None
# The protocol datagram is described here:
# https://developer.chrome.com/extensions/nativeMessaging#native-messaging-host-protocol
#
# The protocol itself is described here:
# https://github.com/open-eid/chrome-token-signing/wiki/NativeMessagingAPI
class TestSequenceFunctions(unittest.TestCase):
def tranceive(self, msg):
# send like described in
print "SEND: %s" % msg
self.p.stdin.write(struct.pack("=I", len(msg)))
self.p.stdin.write(msg)
# now read the input
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
response = str(self.p.stdout.read(response_length))
# make it into "oneline" json before printing
response_print = json.dumps(json.loads(response))
print "RECV: %s" % response_print
return json.loads(response)
def complete_msg(self, msg):
msg["nonce"] = str(uuid.uuid4())
msg["lang"] = "eng"
msg["protocol"] = "https:"
return msg
def setUp(self):
global exe
self.p = subprocess.Popen(exe, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True, stderr=None)
print "Running native component on PID %d" % self.p.pid
def test_random_string(self):
cmd = "BLAH"
self.tranceive(cmd)
def test_utopic_length(self):
self.p.stdin.write(struct.pack("=I", 0xFFFFFFFE))
response_length = struct.unpack("=I", self.p.stdout.read(4))[0]
def test_nonce_echo(self):
cmd = self.complete_msg({"type": "VERSION"})
original_nonce = cmd["nonce"]
resp = self.tranceive(json.dumps(cmd))
self.assertEqual(resp["nonce"], original_nonce)
def test_version(self):
cmd = json.dumps(self.complete_msg({"type":"VERSION"}))
resp = self.tranceive(cmd)
self.assertEqual(resp["version"], "LOCAL_BUILD")
def test_get_certificate_cancel(self):
print "PRESS CANCEL IN THE DIALOG"
cmd = json.dumps(self.complete_msg({"type":"CERT"}))
resp = self.tranceive(cmd)
if __name__ == '__main__':
if len(sys.argv) > 1:
exe = sys.argv[1]
# remove argument so that unittest.main() would work as expected
sys.argv = [sys.argv[0]]
else:
print "usage: stateless-test.py <path to executable>"
sys.exit(1)
# run tests
unittest.main()
|
|
333ae389c14593421d01bd0c8f6904d1688dfa7e
|
ixxy_admin_utils/dashboard_modules.py
|
ixxy_admin_utils/dashboard_modules.py
|
from admin_tools.dashboard.modules import LinkList
from django.core.urlresolvers import reverse
from linkcheck.views import get_status_message
class PermCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_perms = kwargs.pop('required_perms', [])
super(PermCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(PermCheckingLinkList, self).init_with_context(context)
if self.required_perms:
if not context['request'].user.has_perms(self.required_perms):
self.children = None
self.pre_content = None
self.post_content = None
class GroupCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_group = kwargs.pop('required_group', None)
super(GroupCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(GroupCheckingLinkList, self).init_with_context(context)
if self.required_group:
if not context['request'].user.groups.filter(name=self.required_group):
self.children = None
self.pre_content = None
self.post_content = None
linkcheck_perm_checking_dashboard_module = PermCheckingLinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
),
required_perms=['linkcheck.can_change_link'],
)
|
Add our custom dashboard modules
|
Add our custom dashboard modules
|
Python
|
mit
|
DjangoAdminHackers/ixxy-admin-utils,DjangoAdminHackers/ixxy-admin-utils
|
Add our custom dashboard modules
|
from admin_tools.dashboard.modules import LinkList
from django.core.urlresolvers import reverse
from linkcheck.views import get_status_message
class PermCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_perms = kwargs.pop('required_perms', [])
super(PermCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(PermCheckingLinkList, self).init_with_context(context)
if self.required_perms:
if not context['request'].user.has_perms(self.required_perms):
self.children = None
self.pre_content = None
self.post_content = None
class GroupCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_group = kwargs.pop('required_group', None)
super(GroupCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(GroupCheckingLinkList, self).init_with_context(context)
if self.required_group:
if not context['request'].user.groups.filter(name=self.required_group):
self.children = None
self.pre_content = None
self.post_content = None
linkcheck_perm_checking_dashboard_module = PermCheckingLinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
),
required_perms=['linkcheck.can_change_link'],
)
|
<commit_before><commit_msg>Add our custom dashboard modules<commit_after>
|
from admin_tools.dashboard.modules import LinkList
from django.core.urlresolvers import reverse
from linkcheck.views import get_status_message
class PermCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_perms = kwargs.pop('required_perms', [])
super(PermCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(PermCheckingLinkList, self).init_with_context(context)
if self.required_perms:
if not context['request'].user.has_perms(self.required_perms):
self.children = None
self.pre_content = None
self.post_content = None
class GroupCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_group = kwargs.pop('required_group', None)
super(GroupCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(GroupCheckingLinkList, self).init_with_context(context)
if self.required_group:
if not context['request'].user.groups.filter(name=self.required_group):
self.children = None
self.pre_content = None
self.post_content = None
linkcheck_perm_checking_dashboard_module = PermCheckingLinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
),
required_perms=['linkcheck.can_change_link'],
)
|
Add our custom dashboard modulesfrom admin_tools.dashboard.modules import LinkList
from django.core.urlresolvers import reverse
from linkcheck.views import get_status_message
class PermCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_perms = kwargs.pop('required_perms', [])
super(PermCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(PermCheckingLinkList, self).init_with_context(context)
if self.required_perms:
if not context['request'].user.has_perms(self.required_perms):
self.children = None
self.pre_content = None
self.post_content = None
class GroupCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_group = kwargs.pop('required_group', None)
super(GroupCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(GroupCheckingLinkList, self).init_with_context(context)
if self.required_group:
if not context['request'].user.groups.filter(name=self.required_group):
self.children = None
self.pre_content = None
self.post_content = None
linkcheck_perm_checking_dashboard_module = PermCheckingLinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
),
required_perms=['linkcheck.can_change_link'],
)
|
<commit_before><commit_msg>Add our custom dashboard modules<commit_after>from admin_tools.dashboard.modules import LinkList
from django.core.urlresolvers import reverse
from linkcheck.views import get_status_message
class PermCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_perms = kwargs.pop('required_perms', [])
super(PermCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(PermCheckingLinkList, self).init_with_context(context)
if self.required_perms:
if not context['request'].user.has_perms(self.required_perms):
self.children = None
self.pre_content = None
self.post_content = None
class GroupCheckingLinkList(LinkList):
def __init__(self, title=None, **kwargs):
self.required_group = kwargs.pop('required_group', None)
super(GroupCheckingLinkList, self).__init__(title, **kwargs)
def init_with_context(self, context):
super(GroupCheckingLinkList, self).init_with_context(context)
if self.required_group:
if not context['request'].user.groups.filter(name=self.required_group):
self.children = None
self.pre_content = None
self.post_content = None
linkcheck_perm_checking_dashboard_module = PermCheckingLinkList(
title="Linkchecker",
pre_content=get_status_message,
children=(
{'title': 'Valid links', 'url': reverse('linkcheck_report') + '?filters=show_valid'},
{'title': 'Broken links', 'url': reverse('linkcheck_report')},
{'title': 'Untested links', 'url': reverse('linkcheck_report') + '?filters=show_unchecked'},
{'title': 'Ignored links', 'url': reverse('linkcheck_report') + '?filters=ignored'},
),
required_perms=['linkcheck.can_change_link'],
)
|
|
62ec4eca706bbb521d02ec597c0c18a949b37e52
|
sysctl/tests/test_sysctl_setvalue.py
|
sysctl/tests/test_sysctl_setvalue.py
|
import os
import sysctl
from sysctl.tests import SysctlTestBase
class TestSysctlANum(SysctlTestBase):
def test_sysctl_setvalue(self):
dummy = sysctl.filter('kern.dummy')[0]
try:
self.command("/sbin/sysctl kern.dummy=0")
except:
if os.getuid() == 0:
raise
try:
dummy.value = 1
except TypeError:
if os.getuid() == 0:
raise
if os.getuid() == 0:
value = int(self.command("/sbin/sysctl -n kern.dummy"))
if value != 1:
raise ValueError("Failed to set kern.dummy")
|
Add test to set a sysctl value
|
Add test to set a sysctl value
|
Python
|
bsd-2-clause
|
williambr/py-sysctl,williambr/py-sysctl
|
Add test to set a sysctl value
|
import os
import sysctl
from sysctl.tests import SysctlTestBase
class TestSysctlANum(SysctlTestBase):
def test_sysctl_setvalue(self):
dummy = sysctl.filter('kern.dummy')[0]
try:
self.command("/sbin/sysctl kern.dummy=0")
except:
if os.getuid() == 0:
raise
try:
dummy.value = 1
except TypeError:
if os.getuid() == 0:
raise
if os.getuid() == 0:
value = int(self.command("/sbin/sysctl -n kern.dummy"))
if value != 1:
raise ValueError("Failed to set kern.dummy")
|
<commit_before><commit_msg>Add test to set a sysctl value<commit_after>
|
import os
import sysctl
from sysctl.tests import SysctlTestBase
class TestSysctlANum(SysctlTestBase):
def test_sysctl_setvalue(self):
dummy = sysctl.filter('kern.dummy')[0]
try:
self.command("/sbin/sysctl kern.dummy=0")
except:
if os.getuid() == 0:
raise
try:
dummy.value = 1
except TypeError:
if os.getuid() == 0:
raise
if os.getuid() == 0:
value = int(self.command("/sbin/sysctl -n kern.dummy"))
if value != 1:
raise ValueError("Failed to set kern.dummy")
|
Add test to set a sysctl valueimport os
import sysctl
from sysctl.tests import SysctlTestBase
class TestSysctlANum(SysctlTestBase):
def test_sysctl_setvalue(self):
dummy = sysctl.filter('kern.dummy')[0]
try:
self.command("/sbin/sysctl kern.dummy=0")
except:
if os.getuid() == 0:
raise
try:
dummy.value = 1
except TypeError:
if os.getuid() == 0:
raise
if os.getuid() == 0:
value = int(self.command("/sbin/sysctl -n kern.dummy"))
if value != 1:
raise ValueError("Failed to set kern.dummy")
|
<commit_before><commit_msg>Add test to set a sysctl value<commit_after>import os
import sysctl
from sysctl.tests import SysctlTestBase
class TestSysctlANum(SysctlTestBase):
def test_sysctl_setvalue(self):
dummy = sysctl.filter('kern.dummy')[0]
try:
self.command("/sbin/sysctl kern.dummy=0")
except:
if os.getuid() == 0:
raise
try:
dummy.value = 1
except TypeError:
if os.getuid() == 0:
raise
if os.getuid() == 0:
value = int(self.command("/sbin/sysctl -n kern.dummy"))
if value != 1:
raise ValueError("Failed to set kern.dummy")
|
|
490d5110b46afe5210bf2da4489854561e1bb259
|
bsd2/vagrant-ansible/provisioning/getrstudio.py
|
bsd2/vagrant-ansible/provisioning/getrstudio.py
|
#!/usr/bin/python
#
# RStudio does not provide a "-latest" download link so we deduce it from
# their S3 bucket listing and save the download to /tmp/rstudio-latest.deb.
#
# Test this script by running:
# wget -O /tmp/somefile.xml http://download1.rstudio.org
# python thisscript.py /tmp/somefile.xml
import sys
import urllib
import xml.etree.ElementTree as ET
# There's also download2
RSTUDIO_MIRROR = 'http://download1.rstudio.org'
DEST = '/tmp/rstudio-latest.deb'
# Debugging; save output from RSTUDIO_MIRROR to a file then specify the
# path to the file as first argument
DEBUG = len(sys.argv) > 1
if DEBUG:
f = open(sys.argv[1])
else:
f = urllib.urlopen(RSTUDIO_MIRROR)
# S3 bucket; http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
root = ET.parse(f).getroot()
# {http://s3.amazonaws.com/doc/2006-03-01/}
namespace = root.tag.split('ListBucketResult')[0]
latest = ''
for child in root:
try: filename = child.find(namespace + 'Key').text
except: continue
# Filename comparison relies on rstudio's stable naming convention
if filename.endswith('amd64.deb') and filename > latest: latest = filename
if DEBUG:
print RSTUDIO_MIRROR + '/' + latest
else:
dl = urllib.urlretrieve(RSTUDIO_MIRROR + '/' + latest, DEST)
|
Add a utility to retrieve the latest version of RStudio.
|
Add a utility to retrieve the latest version of RStudio.
|
Python
|
apache-2.0
|
dlab-berkeley/collaboratool-archive,dlab-berkeley/collaboratool-archive,dlab-berkeley/collaboratool-archive,dlab-berkeley/collaboratool-archive,dlab-berkeley/collaboratool-archive
|
Add a utility to retrieve the latest version of RStudio.
|
#!/usr/bin/python
#
# RStudio does not provide a "-latest" download link so we deduce it from
# their S3 bucket listing and save the download to /tmp/rstudio-latest.deb.
#
# Test this script by running:
# wget -O /tmp/somefile.xml http://download1.rstudio.org
# python thisscript.py /tmp/somefile.xml
import sys
import urllib
import xml.etree.ElementTree as ET
# There's also download2
RSTUDIO_MIRROR = 'http://download1.rstudio.org'
DEST = '/tmp/rstudio-latest.deb'
# Debugging; save output from RSTUDIO_MIRROR to a file then specify the
# path to the file as first argument
DEBUG = len(sys.argv) > 1
if DEBUG:
f = open(sys.argv[1])
else:
f = urllib.urlopen(RSTUDIO_MIRROR)
# S3 bucket; http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
root = ET.parse(f).getroot()
# {http://s3.amazonaws.com/doc/2006-03-01/}
namespace = root.tag.split('ListBucketResult')[0]
latest = ''
for child in root:
try: filename = child.find(namespace + 'Key').text
except: continue
# Filename comparison relies on rstudio's stable naming convention
if filename.endswith('amd64.deb') and filename > latest: latest = filename
if DEBUG:
print RSTUDIO_MIRROR + '/' + latest
else:
dl = urllib.urlretrieve(RSTUDIO_MIRROR + '/' + latest, DEST)
|
<commit_before><commit_msg>Add a utility to retrieve the latest version of RStudio.<commit_after>
|
#!/usr/bin/python
#
# RStudio does not provide a "-latest" download link so we deduce it from
# their S3 bucket listing and save the download to /tmp/rstudio-latest.deb.
#
# Test this script by running:
# wget -O /tmp/somefile.xml http://download1.rstudio.org
# python thisscript.py /tmp/somefile.xml
import sys
import urllib
import xml.etree.ElementTree as ET
# There's also download2
RSTUDIO_MIRROR = 'http://download1.rstudio.org'
DEST = '/tmp/rstudio-latest.deb'
# Debugging; save output from RSTUDIO_MIRROR to a file then specify the
# path to the file as first argument
DEBUG = len(sys.argv) > 1
if DEBUG:
f = open(sys.argv[1])
else:
f = urllib.urlopen(RSTUDIO_MIRROR)
# S3 bucket; http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
root = ET.parse(f).getroot()
# {http://s3.amazonaws.com/doc/2006-03-01/}
namespace = root.tag.split('ListBucketResult')[0]
latest = ''
for child in root:
try: filename = child.find(namespace + 'Key').text
except: continue
# Filename comparison relies on rstudio's stable naming convention
if filename.endswith('amd64.deb') and filename > latest: latest = filename
if DEBUG:
print RSTUDIO_MIRROR + '/' + latest
else:
dl = urllib.urlretrieve(RSTUDIO_MIRROR + '/' + latest, DEST)
|
Add a utility to retrieve the latest version of RStudio.#!/usr/bin/python
#
# RStudio does not provide a "-latest" download link so we deduce it from
# their S3 bucket listing and save the download to /tmp/rstudio-latest.deb.
#
# Test this script by running:
# wget -O /tmp/somefile.xml http://download1.rstudio.org
# python thisscript.py /tmp/somefile.xml
import sys
import urllib
import xml.etree.ElementTree as ET
# There's also download2
RSTUDIO_MIRROR = 'http://download1.rstudio.org'
DEST = '/tmp/rstudio-latest.deb'
# Debugging; save output from RSTUDIO_MIRROR to a file then specify the
# path to the file as first argument
DEBUG = len(sys.argv) > 1
if DEBUG:
f = open(sys.argv[1])
else:
f = urllib.urlopen(RSTUDIO_MIRROR)
# S3 bucket; http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
root = ET.parse(f).getroot()
# {http://s3.amazonaws.com/doc/2006-03-01/}
namespace = root.tag.split('ListBucketResult')[0]
latest = ''
for child in root:
try: filename = child.find(namespace + 'Key').text
except: continue
# Filename comparison relies on rstudio's stable naming convention
if filename.endswith('amd64.deb') and filename > latest: latest = filename
if DEBUG:
print RSTUDIO_MIRROR + '/' + latest
else:
dl = urllib.urlretrieve(RSTUDIO_MIRROR + '/' + latest, DEST)
|
<commit_before><commit_msg>Add a utility to retrieve the latest version of RStudio.<commit_after>#!/usr/bin/python
#
# RStudio does not provide a "-latest" download link so we deduce it from
# their S3 bucket listing and save the download to /tmp/rstudio-latest.deb.
#
# Test this script by running:
# wget -O /tmp/somefile.xml http://download1.rstudio.org
# python thisscript.py /tmp/somefile.xml
import sys
import urllib
import xml.etree.ElementTree as ET
# There's also download2
RSTUDIO_MIRROR = 'http://download1.rstudio.org'
DEST = '/tmp/rstudio-latest.deb'
# Debugging; save output from RSTUDIO_MIRROR to a file then specify the
# path to the file as first argument
DEBUG = len(sys.argv) > 1
if DEBUG:
f = open(sys.argv[1])
else:
f = urllib.urlopen(RSTUDIO_MIRROR)
# S3 bucket; http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
root = ET.parse(f).getroot()
# {http://s3.amazonaws.com/doc/2006-03-01/}
namespace = root.tag.split('ListBucketResult')[0]
latest = ''
for child in root:
try: filename = child.find(namespace + 'Key').text
except: continue
# Filename comparison relies on rstudio's stable naming convention
if filename.endswith('amd64.deb') and filename > latest: latest = filename
if DEBUG:
print RSTUDIO_MIRROR + '/' + latest
else:
dl = urllib.urlretrieve(RSTUDIO_MIRROR + '/' + latest, DEST)
|
|
30e33acc50725ea5f8bb4a094e60dbeadaa6d1f8
|
CodeFights/cyclicName.py
|
CodeFights/cyclicName.py
|
#!/usr/local/bin/python
# Code Fights Cyclic Name Problem
from itertools import cycle
def cyclicName(name, n):
gen = cycle(name)
res = [next(gen) for _ in range(n)]
return ''.join(res)
def main():
tests = [
["nicecoder", 15, "nicecoderniceco"],
["codefights", 50, "codefightscodefightscodefightscodefightscode"
"fights"],
["test", 4, "test"],
["q", 8, "qqqqqqqq"],
["ninja", 15, "ninjaninjaninja"]
]
for t in tests:
res = cyclicName(t[0], t[1])
if t[2] == res:
print("PASSED: cyclicName({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: cyclicName({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights cyclic name problem
|
Solve Code Fights cyclic name problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights cyclic name problem
|
#!/usr/local/bin/python
# Code Fights Cyclic Name Problem
from itertools import cycle
def cyclicName(name, n):
gen = cycle(name)
res = [next(gen) for _ in range(n)]
return ''.join(res)
def main():
tests = [
["nicecoder", 15, "nicecoderniceco"],
["codefights", 50, "codefightscodefightscodefightscodefightscode"
"fights"],
["test", 4, "test"],
["q", 8, "qqqqqqqq"],
["ninja", 15, "ninjaninjaninja"]
]
for t in tests:
res = cyclicName(t[0], t[1])
if t[2] == res:
print("PASSED: cyclicName({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: cyclicName({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights cyclic name problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Cyclic Name Problem
from itertools import cycle
def cyclicName(name, n):
gen = cycle(name)
res = [next(gen) for _ in range(n)]
return ''.join(res)
def main():
tests = [
["nicecoder", 15, "nicecoderniceco"],
["codefights", 50, "codefightscodefightscodefightscodefightscode"
"fights"],
["test", 4, "test"],
["q", 8, "qqqqqqqq"],
["ninja", 15, "ninjaninjaninja"]
]
for t in tests:
res = cyclicName(t[0], t[1])
if t[2] == res:
print("PASSED: cyclicName({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: cyclicName({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights cyclic name problem#!/usr/local/bin/python
# Code Fights Cyclic Name Problem
from itertools import cycle
def cyclicName(name, n):
gen = cycle(name)
res = [next(gen) for _ in range(n)]
return ''.join(res)
def main():
tests = [
["nicecoder", 15, "nicecoderniceco"],
["codefights", 50, "codefightscodefightscodefightscodefightscode"
"fights"],
["test", 4, "test"],
["q", 8, "qqqqqqqq"],
["ninja", 15, "ninjaninjaninja"]
]
for t in tests:
res = cyclicName(t[0], t[1])
if t[2] == res:
print("PASSED: cyclicName({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: cyclicName({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights cyclic name problem<commit_after>#!/usr/local/bin/python
# Code Fights Cyclic Name Problem
from itertools import cycle
def cyclicName(name, n):
gen = cycle(name)
res = [next(gen) for _ in range(n)]
return ''.join(res)
def main():
tests = [
["nicecoder", 15, "nicecoderniceco"],
["codefights", 50, "codefightscodefightscodefightscodefightscode"
"fights"],
["test", 4, "test"],
["q", 8, "qqqqqqqq"],
["ninja", 15, "ninjaninjaninja"]
]
for t in tests:
res = cyclicName(t[0], t[1])
if t[2] == res:
print("PASSED: cyclicName({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: cyclicName({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
|
92c8b146783c807b143a60419efd88f2da11d065
|
gaphor/C4Model/tests/test_propertypages.py
|
gaphor/C4Model/tests/test_propertypages.py
|
from gaphor import C4Model
from gaphor.C4Model.propertypages import DescriptionPropertyPage, TechnologyPropertyPage
from gaphor.diagram.tests.fixtures import find
def test_description_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = DescriptionPropertyPage(subject)
widget = property_page.construct()
description = find(widget, "description")
description.get_buffer().set_text("test")
assert subject.description == "test"
def test_technology_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = TechnologyPropertyPage(subject)
widget = property_page.construct()
technology = find(widget, "technology")
technology.set_text("test")
assert subject.technology == "test"
|
Add tests for C4 model property pages
|
Add tests for C4 model property pages
|
Python
|
lgpl-2.1
|
amolenaar/gaphor,amolenaar/gaphor
|
Add tests for C4 model property pages
|
from gaphor import C4Model
from gaphor.C4Model.propertypages import DescriptionPropertyPage, TechnologyPropertyPage
from gaphor.diagram.tests.fixtures import find
def test_description_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = DescriptionPropertyPage(subject)
widget = property_page.construct()
description = find(widget, "description")
description.get_buffer().set_text("test")
assert subject.description == "test"
def test_technology_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = TechnologyPropertyPage(subject)
widget = property_page.construct()
technology = find(widget, "technology")
technology.set_text("test")
assert subject.technology == "test"
|
<commit_before><commit_msg>Add tests for C4 model property pages<commit_after>
|
from gaphor import C4Model
from gaphor.C4Model.propertypages import DescriptionPropertyPage, TechnologyPropertyPage
from gaphor.diagram.tests.fixtures import find
def test_description_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = DescriptionPropertyPage(subject)
widget = property_page.construct()
description = find(widget, "description")
description.get_buffer().set_text("test")
assert subject.description == "test"
def test_technology_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = TechnologyPropertyPage(subject)
widget = property_page.construct()
technology = find(widget, "technology")
technology.set_text("test")
assert subject.technology == "test"
|
Add tests for C4 model property pagesfrom gaphor import C4Model
from gaphor.C4Model.propertypages import DescriptionPropertyPage, TechnologyPropertyPage
from gaphor.diagram.tests.fixtures import find
def test_description_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = DescriptionPropertyPage(subject)
widget = property_page.construct()
description = find(widget, "description")
description.get_buffer().set_text("test")
assert subject.description == "test"
def test_technology_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = TechnologyPropertyPage(subject)
widget = property_page.construct()
technology = find(widget, "technology")
technology.set_text("test")
assert subject.technology == "test"
|
<commit_before><commit_msg>Add tests for C4 model property pages<commit_after>from gaphor import C4Model
from gaphor.C4Model.propertypages import DescriptionPropertyPage, TechnologyPropertyPage
from gaphor.diagram.tests.fixtures import find
def test_description_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = DescriptionPropertyPage(subject)
widget = property_page.construct()
description = find(widget, "description")
description.get_buffer().set_text("test")
assert subject.description == "test"
def test_technology_property_page(element_factory):
subject = element_factory.create(C4Model.c4model.C4Container)
property_page = TechnologyPropertyPage(subject)
widget = property_page.construct()
technology = find(widget, "technology")
technology.set_text("test")
assert subject.technology == "test"
|
|
1c4429759a3e89ed952b1a025b1470a9e187537f
|
tests/test_spatial_reference.py
|
tests/test_spatial_reference.py
|
# -*- coding: utf-8 -*-
import rasterio
import pytest
from math import pi
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import EPSG_WEB_MERCATOR
from gdal2mbtiles.gdal import SpatialReference
# SEMI_MAJOR is a constant referring to the WGS84 Semi Major Axis.
SEMI_MAJOR = 6378137.0
# Note: web-Mercator = pseudo-Mercator = EPSG 3857
# The extents of the web-Mercator are constants.
# Since the projection is formed from a sphere the extents of the projection
# form a square.
# For the values of the extents refer to:
# OpenLayer lib: http://docs.openlayers.org/library/spherical_mercator.html
EPSG3857_EXTENT = pi * SEMI_MAJOR
EPSG3857_EXTENTS = array([[-EPSG3857_EXTENT]*2, [EPSG3857_EXTENT]*2])
epsg_3857_raster_path = 'tests/web_mercator_3857.tif'
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
ds_3857 = rasterio.open(epsg_3857_raster_path)
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4(ds_3857.crs.to_string())
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
|
Add test for the EPSG-3857 spatial reference.
|
Add test for the EPSG-3857 spatial reference.
|
Python
|
apache-2.0
|
ecometrica/gdal2mbtiles
|
Add test for the EPSG-3857 spatial reference.
|
# -*- coding: utf-8 -*-
import rasterio
import pytest
from math import pi
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import EPSG_WEB_MERCATOR
from gdal2mbtiles.gdal import SpatialReference
# SEMI_MAJOR is a constant referring to the WGS84 Semi Major Axis.
SEMI_MAJOR = 6378137.0
# Note: web-Mercator = pseudo-Mercator = EPSG 3857
# The extents of the web-Mercator are constants.
# Since the projection is formed from a sphere the extents of the projection
# form a square.
# For the values of the extents refer to:
# OpenLayer lib: http://docs.openlayers.org/library/spherical_mercator.html
EPSG3857_EXTENT = pi * SEMI_MAJOR
EPSG3857_EXTENTS = array([[-EPSG3857_EXTENT]*2, [EPSG3857_EXTENT]*2])
epsg_3857_raster_path = 'tests/web_mercator_3857.tif'
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
ds_3857 = rasterio.open(epsg_3857_raster_path)
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4(ds_3857.crs.to_string())
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
|
<commit_before><commit_msg>Add test for the EPSG-3857 spatial reference.<commit_after>
|
# -*- coding: utf-8 -*-
import rasterio
import pytest
from math import pi
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import EPSG_WEB_MERCATOR
from gdal2mbtiles.gdal import SpatialReference
# SEMI_MAJOR is a constant referring to the WGS84 Semi Major Axis.
SEMI_MAJOR = 6378137.0
# Note: web-Mercator = pseudo-Mercator = EPSG 3857
# The extents of the web-Mercator are constants.
# Since the projection is formed from a sphere the extents of the projection
# form a square.
# For the values of the extents refer to:
# OpenLayer lib: http://docs.openlayers.org/library/spherical_mercator.html
EPSG3857_EXTENT = pi * SEMI_MAJOR
EPSG3857_EXTENTS = array([[-EPSG3857_EXTENT]*2, [EPSG3857_EXTENT]*2])
epsg_3857_raster_path = 'tests/web_mercator_3857.tif'
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
ds_3857 = rasterio.open(epsg_3857_raster_path)
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4(ds_3857.crs.to_string())
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
|
Add test for the EPSG-3857 spatial reference.# -*- coding: utf-8 -*-
import rasterio
import pytest
from math import pi
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import EPSG_WEB_MERCATOR
from gdal2mbtiles.gdal import SpatialReference
# SEMI_MAJOR is a constant referring to the WGS84 Semi Major Axis.
SEMI_MAJOR = 6378137.0
# Note: web-Mercator = pseudo-Mercator = EPSG 3857
# The extents of the web-Mercator are constants.
# Since the projection is formed from a sphere the extents of the projection
# form a square.
# For the values of the extents refer to:
# OpenLayer lib: http://docs.openlayers.org/library/spherical_mercator.html
EPSG3857_EXTENT = pi * SEMI_MAJOR
EPSG3857_EXTENTS = array([[-EPSG3857_EXTENT]*2, [EPSG3857_EXTENT]*2])
epsg_3857_raster_path = 'tests/web_mercator_3857.tif'
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
ds_3857 = rasterio.open(epsg_3857_raster_path)
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4(ds_3857.crs.to_string())
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
|
<commit_before><commit_msg>Add test for the EPSG-3857 spatial reference.<commit_after># -*- coding: utf-8 -*-
import rasterio
import pytest
from math import pi
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import EPSG_WEB_MERCATOR
from gdal2mbtiles.gdal import SpatialReference
# SEMI_MAJOR is a constant referring to the WGS84 Semi Major Axis.
SEMI_MAJOR = 6378137.0
# Note: web-Mercator = pseudo-Mercator = EPSG 3857
# The extents of the web-Mercator are constants.
# Since the projection is formed from a sphere the extents of the projection
# form a square.
# For the values of the extents refer to:
# OpenLayer lib: http://docs.openlayers.org/library/spherical_mercator.html
EPSG3857_EXTENT = pi * SEMI_MAJOR
EPSG3857_EXTENTS = array([[-EPSG3857_EXTENT]*2, [EPSG3857_EXTENT]*2])
epsg_3857_raster_path = 'tests/web_mercator_3857.tif'
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
ds_3857 = rasterio.open(epsg_3857_raster_path)
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4(ds_3857.crs.to_string())
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spatial_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
|
|
0d92340f2b1cab932f14e376dc776a7c11e3e42f
|
tests/batch_fetch/test_generic_relationship.py
|
tests/batch_fetch/test_generic_relationship.py
|
from __future__ import unicode_literals
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import batch_fetch, generic_relationship
class TestBatchFetchGenericRelationship(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Building = Building
self.User = User
self.Event = Event
def test_batch_fetch(self):
user = self.User()
self.session.add(user)
self.session.commit()
event = self.Event(object=user)
self.session.add(event)
self.session.commit()
events = self.session.query(self.Event).all()
batch_fetch(events, 'object')
query_count = self.connection.query_count
events[0].object
assert self.connection.query_count == query_count
|
Add tests for generic relationship batch fetching
|
Add tests for generic relationship batch fetching
|
Python
|
bsd-3-clause
|
konstantinoskostis/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,cheungpat/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,spoqa/sqlalchemy-utils,rmoorman/sqlalchemy-utils,marrybird/sqlalchemy-utils,JackWink/sqlalchemy-utils
|
Add tests for generic relationship batch fetching
|
from __future__ import unicode_literals
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import batch_fetch, generic_relationship
class TestBatchFetchGenericRelationship(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Building = Building
self.User = User
self.Event = Event
def test_batch_fetch(self):
user = self.User()
self.session.add(user)
self.session.commit()
event = self.Event(object=user)
self.session.add(event)
self.session.commit()
events = self.session.query(self.Event).all()
batch_fetch(events, 'object')
query_count = self.connection.query_count
events[0].object
assert self.connection.query_count == query_count
|
<commit_before><commit_msg>Add tests for generic relationship batch fetching<commit_after>
|
from __future__ import unicode_literals
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import batch_fetch, generic_relationship
class TestBatchFetchGenericRelationship(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Building = Building
self.User = User
self.Event = Event
def test_batch_fetch(self):
user = self.User()
self.session.add(user)
self.session.commit()
event = self.Event(object=user)
self.session.add(event)
self.session.commit()
events = self.session.query(self.Event).all()
batch_fetch(events, 'object')
query_count = self.connection.query_count
events[0].object
assert self.connection.query_count == query_count
|
Add tests for generic relationship batch fetchingfrom __future__ import unicode_literals
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import batch_fetch, generic_relationship
class TestBatchFetchGenericRelationship(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Building = Building
self.User = User
self.Event = Event
def test_batch_fetch(self):
user = self.User()
self.session.add(user)
self.session.commit()
event = self.Event(object=user)
self.session.add(event)
self.session.commit()
events = self.session.query(self.Event).all()
batch_fetch(events, 'object')
query_count = self.connection.query_count
events[0].object
assert self.connection.query_count == query_count
|
<commit_before><commit_msg>Add tests for generic relationship batch fetching<commit_after>from __future__ import unicode_literals
import sqlalchemy as sa
from tests import TestCase
from sqlalchemy_utils import batch_fetch, generic_relationship
class TestBatchFetchGenericRelationship(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
class Event(self.Base):
__tablename__ = 'event'
id = sa.Column(sa.Integer, primary_key=True)
object_type = sa.Column(sa.Unicode(255))
object_id = sa.Column(sa.Integer, nullable=False)
object = generic_relationship(object_type, object_id)
self.Building = Building
self.User = User
self.Event = Event
def test_batch_fetch(self):
user = self.User()
self.session.add(user)
self.session.commit()
event = self.Event(object=user)
self.session.add(event)
self.session.commit()
events = self.session.query(self.Event).all()
batch_fetch(events, 'object')
query_count = self.connection.query_count
events[0].object
assert self.connection.query_count == query_count
|
|
fb3766204cbb25ccf8ee73ab4f480ba34251542c
|
nagios/check_hads_ingest.py
|
nagios/check_hads_ingest.py
|
"""
Check how much HADSA data we have
"""
import os
import sys
import stat
import iemdb
HADS = iemdb.connect('iem', bypass=True)
hcursor = HADS.cursor()
def check():
hcursor.execute("""SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval""")
row = hcursor.fetchone()
return row[0]
if __name__ == '__main__':
count = check()
if count > 10000:
print 'OK - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(0)
elif count > 5000:
print 'WARNING - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(2)
|
Add check on HADS ingest
|
Add check on HADS ingest
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add check on HADS ingest
|
"""
Check how much HADSA data we have
"""
import os
import sys
import stat
import iemdb
HADS = iemdb.connect('iem', bypass=True)
hcursor = HADS.cursor()
def check():
hcursor.execute("""SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval""")
row = hcursor.fetchone()
return row[0]
if __name__ == '__main__':
count = check()
if count > 10000:
print 'OK - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(0)
elif count > 5000:
print 'WARNING - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(2)
|
<commit_before><commit_msg>Add check on HADS ingest<commit_after>
|
"""
Check how much HADSA data we have
"""
import os
import sys
import stat
import iemdb
HADS = iemdb.connect('iem', bypass=True)
hcursor = HADS.cursor()
def check():
hcursor.execute("""SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval""")
row = hcursor.fetchone()
return row[0]
if __name__ == '__main__':
count = check()
if count > 10000:
print 'OK - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(0)
elif count > 5000:
print 'WARNING - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(2)
|
Add check on HADS ingest"""
Check how much HADSA data we have
"""
import os
import sys
import stat
import iemdb
HADS = iemdb.connect('iem', bypass=True)
hcursor = HADS.cursor()
def check():
hcursor.execute("""SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval""")
row = hcursor.fetchone()
return row[0]
if __name__ == '__main__':
count = check()
if count > 10000:
print 'OK - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(0)
elif count > 5000:
print 'WARNING - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(2)
|
<commit_before><commit_msg>Add check on HADS ingest<commit_after>"""
Check how much HADSA data we have
"""
import os
import sys
import stat
import iemdb
HADS = iemdb.connect('iem', bypass=True)
hcursor = HADS.cursor()
def check():
hcursor.execute("""SELECT count(*) from current_shef
WHERE valid > now() - '1 hour'::interval""")
row = hcursor.fetchone()
return row[0]
if __name__ == '__main__':
count = check()
if count > 10000:
print 'OK - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(0)
elif count > 5000:
print 'WARNING - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - %s count |count=%s;1000;5000;10000' % (count, count)
sys.exit(2)
|
|
a7529057f590f8b5185a77b3ff62889b1357a0e7
|
data/word_embeddings/create_graph_from_corpus.py
|
data/word_embeddings/create_graph_from_corpus.py
|
from __future__ import print_function
import sys
from random import shuffle
DISTANCE = 10
with open(sys.argv[1], 'r') as corpus:
text = corpus.read()
text = text[:1000000]
words_list = list(set(text.split()))
word_to_id = {}
# Write word id mappings
for index, word in enumerate(list(set(words_list))):
word_to_id[word] = index
# Construct graph
g = {}
words = text.strip().split(" ")
lines = [words[i:i+DISTANCE] for i in range(len(words))]
for line in lines:
if len(line) < DISTANCE:
continue
#print("LINE: ", line)
first_word = line[0]
for other_word in line:
if other_word == first_word:
continue
a, b = tuple(sorted([first_word, other_word]))
if (a,b) not in g:
g[(a, b)] = 0
g[(a, b)] += 1
# Output graph to file
f = open("w2v_graph", "w")
n_nodes = len(set(list(words_list)))
n_edges = len(g.items());
print("%d" % n_nodes, file=f)
for word_pair, occ in g.items():
print("%d %d %d" % (word_to_id[word_pair[0]], word_to_id[word_pair[1]], occ), file=f)
# Print stats
print("N_NODES=%d" % n_nodes)
print("N_EDGES=%d" % n_edges)
f.close()
|
Add w2v create graph from corpus
|
Add w2v create graph from corpus
|
Python
|
apache-2.0
|
agnusmaximus/cyclades,agnusmaximus/cyclades,agnusmaximus/cyclades,agnusmaximus/cyclades
|
Add w2v create graph from corpus
|
from __future__ import print_function
import sys
from random import shuffle
DISTANCE = 10
with open(sys.argv[1], 'r') as corpus:
text = corpus.read()
text = text[:1000000]
words_list = list(set(text.split()))
word_to_id = {}
# Write word id mappings
for index, word in enumerate(list(set(words_list))):
word_to_id[word] = index
# Construct graph
g = {}
words = text.strip().split(" ")
lines = [words[i:i+DISTANCE] for i in range(len(words))]
for line in lines:
if len(line) < DISTANCE:
continue
#print("LINE: ", line)
first_word = line[0]
for other_word in line:
if other_word == first_word:
continue
a, b = tuple(sorted([first_word, other_word]))
if (a,b) not in g:
g[(a, b)] = 0
g[(a, b)] += 1
# Output graph to file
f = open("w2v_graph", "w")
n_nodes = len(set(list(words_list)))
n_edges = len(g.items());
print("%d" % n_nodes, file=f)
for word_pair, occ in g.items():
print("%d %d %d" % (word_to_id[word_pair[0]], word_to_id[word_pair[1]], occ), file=f)
# Print stats
print("N_NODES=%d" % n_nodes)
print("N_EDGES=%d" % n_edges)
f.close()
|
<commit_before><commit_msg>Add w2v create graph from corpus<commit_after>
|
from __future__ import print_function
import sys
from random import shuffle
DISTANCE = 10
with open(sys.argv[1], 'r') as corpus:
text = corpus.read()
text = text[:1000000]
words_list = list(set(text.split()))
word_to_id = {}
# Write word id mappings
for index, word in enumerate(list(set(words_list))):
word_to_id[word] = index
# Construct graph
g = {}
words = text.strip().split(" ")
lines = [words[i:i+DISTANCE] for i in range(len(words))]
for line in lines:
if len(line) < DISTANCE:
continue
#print("LINE: ", line)
first_word = line[0]
for other_word in line:
if other_word == first_word:
continue
a, b = tuple(sorted([first_word, other_word]))
if (a,b) not in g:
g[(a, b)] = 0
g[(a, b)] += 1
# Output graph to file
f = open("w2v_graph", "w")
n_nodes = len(set(list(words_list)))
n_edges = len(g.items());
print("%d" % n_nodes, file=f)
for word_pair, occ in g.items():
print("%d %d %d" % (word_to_id[word_pair[0]], word_to_id[word_pair[1]], occ), file=f)
# Print stats
print("N_NODES=%d" % n_nodes)
print("N_EDGES=%d" % n_edges)
f.close()
|
Add w2v create graph from corpusfrom __future__ import print_function
import sys
from random import shuffle
DISTANCE = 10
with open(sys.argv[1], 'r') as corpus:
text = corpus.read()
text = text[:1000000]
words_list = list(set(text.split()))
word_to_id = {}
# Write word id mappings
for index, word in enumerate(list(set(words_list))):
word_to_id[word] = index
# Construct graph
g = {}
words = text.strip().split(" ")
lines = [words[i:i+DISTANCE] for i in range(len(words))]
for line in lines:
if len(line) < DISTANCE:
continue
#print("LINE: ", line)
first_word = line[0]
for other_word in line:
if other_word == first_word:
continue
a, b = tuple(sorted([first_word, other_word]))
if (a,b) not in g:
g[(a, b)] = 0
g[(a, b)] += 1
# Output graph to file
f = open("w2v_graph", "w")
n_nodes = len(set(list(words_list)))
n_edges = len(g.items());
print("%d" % n_nodes, file=f)
for word_pair, occ in g.items():
print("%d %d %d" % (word_to_id[word_pair[0]], word_to_id[word_pair[1]], occ), file=f)
# Print stats
print("N_NODES=%d" % n_nodes)
print("N_EDGES=%d" % n_edges)
f.close()
|
<commit_before><commit_msg>Add w2v create graph from corpus<commit_after>from __future__ import print_function
import sys
from random import shuffle
DISTANCE = 10
with open(sys.argv[1], 'r') as corpus:
text = corpus.read()
text = text[:1000000]
words_list = list(set(text.split()))
word_to_id = {}
# Write word id mappings
for index, word in enumerate(list(set(words_list))):
word_to_id[word] = index
# Construct graph
g = {}
words = text.strip().split(" ")
lines = [words[i:i+DISTANCE] for i in range(len(words))]
for line in lines:
if len(line) < DISTANCE:
continue
#print("LINE: ", line)
first_word = line[0]
for other_word in line:
if other_word == first_word:
continue
a, b = tuple(sorted([first_word, other_word]))
if (a,b) not in g:
g[(a, b)] = 0
g[(a, b)] += 1
# Output graph to file
f = open("w2v_graph", "w")
n_nodes = len(set(list(words_list)))
n_edges = len(g.items());
print("%d" % n_nodes, file=f)
for word_pair, occ in g.items():
print("%d %d %d" % (word_to_id[word_pair[0]], word_to_id[word_pair[1]], occ), file=f)
# Print stats
print("N_NODES=%d" % n_nodes)
print("N_EDGES=%d" % n_edges)
f.close()
|
|
bcb73da9d30d6737fdbdb4ba2378a612851ec9b9
|
lc0336_palindrome_pairs.py
|
lc0336_palindrome_pairs.py
|
"""336. Palindrome Pairs
Hard
URL: https://leetcode.com/problems/palindrome-pairs/
Given a list of unique words, find all pairs of distinct indices (i, j) in the given
list, so that the concatenation of the two words, i.e. words[i] + words[j] is a
palindrome.
Example 1:
Input: ["abcd","dcba","lls","s","sssll"]
Output: [[0,1],[1,0],[3,2],[2,4]]
Explanation: The palindromes are ["dcbaabcd","abcddcba","slls","llssssll"]
Example 2:
Input: ["bat","tab","cat"]
Output: [[0,1],[1,0]]
Explanation: The palindromes are ["battab","tabbat"]
"""
class SolutionWordIdxDictPrefixSuffixPalindrome(object):
def _isPalindrom(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
Time complexity: O(n*m^2), where
- n: number of words
- m: max length of words
Space complexity: O(n+m).
"""
# For each word, check if word's prefix and suffix are palindromes.
n = len(words)
pal_pairs = []
# Use dict: word->idx for quick lookup.
word_idx_d = {word: i for i, word in enumerate(words)}
# Iterate through words, check word's prefix and reversed suffix (and vice versa).
for word, idx in word_idx_d.items():
m = len(word)
for j in range(m + 1):
prefix = word[:j]
suffix = word[j:]
if self._isPalindrom(prefix):
# If prefix and reversed suffix are palindrome,
# append (reversed suffix idx, word idx).
rev_suffix = suffix[::-1]
if rev_suffix != word and rev_suffix in word_idx_d:
pal_pairs.append([word_idx_d[rev_suffix], idx])
if j != m and self._isPalindrom(suffix):
# If suffix and reversed prefix are palindrome,
# append to (word idx, reversed prefix word idx).
rev_prefix = prefix[::-1]
if rev_prefix in word_idx_d:
pal_pairs.append([idx, word_idx_d[rev_prefix]])
return pal_pairs
def main():
# Output: [[0,1],[1,0],[3,2],[2,4]]
words = ["abcd","dcba","lls","s","sssll"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[0,1],[1,0]]
words = ["bat","tab","cat"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[2,0],[2,1]]
words = ["bot","t","to"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
if __name__ == '__main__':
main()
|
Complete word idx dict + prefix/suffix pal sol
|
Complete word idx dict + prefix/suffix pal sol
|
Python
|
bsd-2-clause
|
bowen0701/algorithms_data_structures
|
Complete word idx dict + prefix/suffix pal sol
|
"""336. Palindrome Pairs
Hard
URL: https://leetcode.com/problems/palindrome-pairs/
Given a list of unique words, find all pairs of distinct indices (i, j) in the given
list, so that the concatenation of the two words, i.e. words[i] + words[j] is a
palindrome.
Example 1:
Input: ["abcd","dcba","lls","s","sssll"]
Output: [[0,1],[1,0],[3,2],[2,4]]
Explanation: The palindromes are ["dcbaabcd","abcddcba","slls","llssssll"]
Example 2:
Input: ["bat","tab","cat"]
Output: [[0,1],[1,0]]
Explanation: The palindromes are ["battab","tabbat"]
"""
class SolutionWordIdxDictPrefixSuffixPalindrome(object):
def _isPalindrom(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
Time complexity: O(n*m^2), where
- n: number of words
- m: max length of words
Space complexity: O(n+m).
"""
# For each word, check if word's prefix and suffix are palindromes.
n = len(words)
pal_pairs = []
# Use dict: word->idx for quick lookup.
word_idx_d = {word: i for i, word in enumerate(words)}
# Iterate through words, check word's prefix and reversed suffix (and vice versa).
for word, idx in word_idx_d.items():
m = len(word)
for j in range(m + 1):
prefix = word[:j]
suffix = word[j:]
if self._isPalindrom(prefix):
# If prefix and reversed suffix are palindrome,
# append (reversed suffix idx, word idx).
rev_suffix = suffix[::-1]
if rev_suffix != word and rev_suffix in word_idx_d:
pal_pairs.append([word_idx_d[rev_suffix], idx])
if j != m and self._isPalindrom(suffix):
# If suffix and reversed prefix are palindrome,
# append to (word idx, reversed prefix word idx).
rev_prefix = prefix[::-1]
if rev_prefix in word_idx_d:
pal_pairs.append([idx, word_idx_d[rev_prefix]])
return pal_pairs
def main():
# Output: [[0,1],[1,0],[3,2],[2,4]]
words = ["abcd","dcba","lls","s","sssll"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[0,1],[1,0]]
words = ["bat","tab","cat"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[2,0],[2,1]]
words = ["bot","t","to"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Complete word idx dict + prefix/suffix pal sol<commit_after>
|
"""336. Palindrome Pairs
Hard
URL: https://leetcode.com/problems/palindrome-pairs/
Given a list of unique words, find all pairs of distinct indices (i, j) in the given
list, so that the concatenation of the two words, i.e. words[i] + words[j] is a
palindrome.
Example 1:
Input: ["abcd","dcba","lls","s","sssll"]
Output: [[0,1],[1,0],[3,2],[2,4]]
Explanation: The palindromes are ["dcbaabcd","abcddcba","slls","llssssll"]
Example 2:
Input: ["bat","tab","cat"]
Output: [[0,1],[1,0]]
Explanation: The palindromes are ["battab","tabbat"]
"""
class SolutionWordIdxDictPrefixSuffixPalindrome(object):
def _isPalindrom(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
Time complexity: O(n*m^2), where
- n: number of words
- m: max length of words
Space complexity: O(n+m).
"""
# For each word, check if word's prefix and suffix are palindromes.
n = len(words)
pal_pairs = []
# Use dict: word->idx for quick lookup.
word_idx_d = {word: i for i, word in enumerate(words)}
# Iterate through words, check word's prefix and reversed suffix (and vice versa).
for word, idx in word_idx_d.items():
m = len(word)
for j in range(m + 1):
prefix = word[:j]
suffix = word[j:]
if self._isPalindrom(prefix):
# If prefix and reversed suffix are palindrome,
# append (reversed suffix idx, word idx).
rev_suffix = suffix[::-1]
if rev_suffix != word and rev_suffix in word_idx_d:
pal_pairs.append([word_idx_d[rev_suffix], idx])
if j != m and self._isPalindrom(suffix):
# If suffix and reversed prefix are palindrome,
# append to (word idx, reversed prefix word idx).
rev_prefix = prefix[::-1]
if rev_prefix in word_idx_d:
pal_pairs.append([idx, word_idx_d[rev_prefix]])
return pal_pairs
def main():
# Output: [[0,1],[1,0],[3,2],[2,4]]
words = ["abcd","dcba","lls","s","sssll"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[0,1],[1,0]]
words = ["bat","tab","cat"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[2,0],[2,1]]
words = ["bot","t","to"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
if __name__ == '__main__':
main()
|
Complete word idx dict + prefix/suffix pal sol"""336. Palindrome Pairs
Hard
URL: https://leetcode.com/problems/palindrome-pairs/
Given a list of unique words, find all pairs of distinct indices (i, j) in the given
list, so that the concatenation of the two words, i.e. words[i] + words[j] is a
palindrome.
Example 1:
Input: ["abcd","dcba","lls","s","sssll"]
Output: [[0,1],[1,0],[3,2],[2,4]]
Explanation: The palindromes are ["dcbaabcd","abcddcba","slls","llssssll"]
Example 2:
Input: ["bat","tab","cat"]
Output: [[0,1],[1,0]]
Explanation: The palindromes are ["battab","tabbat"]
"""
class SolutionWordIdxDictPrefixSuffixPalindrome(object):
def _isPalindrom(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
Time complexity: O(n*m^2), where
- n: number of words
- m: max length of words
Space complexity: O(n+m).
"""
# For each word, check if word's prefix and suffix are palindromes.
n = len(words)
pal_pairs = []
# Use dict: word->idx for quick lookup.
word_idx_d = {word: i for i, word in enumerate(words)}
# Iterate through words, check word's prefix and reversed suffix (and vice versa).
for word, idx in word_idx_d.items():
m = len(word)
for j in range(m + 1):
prefix = word[:j]
suffix = word[j:]
if self._isPalindrom(prefix):
# If prefix and reversed suffix are palindrome,
# append (reversed suffix idx, word idx).
rev_suffix = suffix[::-1]
if rev_suffix != word and rev_suffix in word_idx_d:
pal_pairs.append([word_idx_d[rev_suffix], idx])
if j != m and self._isPalindrom(suffix):
# If suffix and reversed prefix are palindrome,
# append to (word idx, reversed prefix word idx).
rev_prefix = prefix[::-1]
if rev_prefix in word_idx_d:
pal_pairs.append([idx, word_idx_d[rev_prefix]])
return pal_pairs
def main():
# Output: [[0,1],[1,0],[3,2],[2,4]]
words = ["abcd","dcba","lls","s","sssll"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[0,1],[1,0]]
words = ["bat","tab","cat"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[2,0],[2,1]]
words = ["bot","t","to"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Complete word idx dict + prefix/suffix pal sol<commit_after>"""336. Palindrome Pairs
Hard
URL: https://leetcode.com/problems/palindrome-pairs/
Given a list of unique words, find all pairs of distinct indices (i, j) in the given
list, so that the concatenation of the two words, i.e. words[i] + words[j] is a
palindrome.
Example 1:
Input: ["abcd","dcba","lls","s","sssll"]
Output: [[0,1],[1,0],[3,2],[2,4]]
Explanation: The palindromes are ["dcbaabcd","abcddcba","slls","llssssll"]
Example 2:
Input: ["bat","tab","cat"]
Output: [[0,1],[1,0]]
Explanation: The palindromes are ["battab","tabbat"]
"""
class SolutionWordIdxDictPrefixSuffixPalindrome(object):
def _isPalindrom(self, word):
return word == word[::-1]
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
Time complexity: O(n*m^2), where
- n: number of words
- m: max length of words
Space complexity: O(n+m).
"""
# For each word, check if word's prefix and suffix are palindromes.
n = len(words)
pal_pairs = []
# Use dict: word->idx for quick lookup.
word_idx_d = {word: i for i, word in enumerate(words)}
# Iterate through words, check word's prefix and reversed suffix (and vice versa).
for word, idx in word_idx_d.items():
m = len(word)
for j in range(m + 1):
prefix = word[:j]
suffix = word[j:]
if self._isPalindrom(prefix):
# If prefix and reversed suffix are palindrome,
# append (reversed suffix idx, word idx).
rev_suffix = suffix[::-1]
if rev_suffix != word and rev_suffix in word_idx_d:
pal_pairs.append([word_idx_d[rev_suffix], idx])
if j != m and self._isPalindrom(suffix):
# If suffix and reversed prefix are palindrome,
# append to (word idx, reversed prefix word idx).
rev_prefix = prefix[::-1]
if rev_prefix in word_idx_d:
pal_pairs.append([idx, word_idx_d[rev_prefix]])
return pal_pairs
def main():
# Output: [[0,1],[1,0],[3,2],[2,4]]
words = ["abcd","dcba","lls","s","sssll"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[0,1],[1,0]]
words = ["bat","tab","cat"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
# Output: [[2,0],[2,1]]
words = ["bot","t","to"]
print SolutionWordIdxDictPrefixSuffixPalindrome().palindromePairs(words)
if __name__ == '__main__':
main()
|
|
74dab4e4e7c87e247659bff6cfd4e333b7cd71f7
|
chapter03/triangleArea.py
|
chapter03/triangleArea.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
print 'Insert one points, separated with commas: '
xy = input('Insert first point: ')
x2y2 = input('Insert second point: ')
def substractVectors(a, b):
return (a[0] - b[0], a[1] - b[1])
def normalize(x, y):
return math.sqrt(x**2 + y**2)
print normalize(*xy)
print normalize(*x2y2)
|
Add normalize and Vector substraction functions
|
Add normalize and Vector substraction functions
|
Python
|
apache-2.0
|
MindCookin/python-exercises
|
Add normalize and Vector substraction functions
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
print 'Insert one points, separated with commas: '
xy = input('Insert first point: ')
x2y2 = input('Insert second point: ')
def substractVectors(a, b):
return (a[0] - b[0], a[1] - b[1])
def normalize(x, y):
return math.sqrt(x**2 + y**2)
print normalize(*xy)
print normalize(*x2y2)
|
<commit_before><commit_msg>Add normalize and Vector substraction functions<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
print 'Insert one points, separated with commas: '
xy = input('Insert first point: ')
x2y2 = input('Insert second point: ')
def substractVectors(a, b):
return (a[0] - b[0], a[1] - b[1])
def normalize(x, y):
return math.sqrt(x**2 + y**2)
print normalize(*xy)
print normalize(*x2y2)
|
Add normalize and Vector substraction functions#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
print 'Insert one points, separated with commas: '
xy = input('Insert first point: ')
x2y2 = input('Insert second point: ')
def substractVectors(a, b):
return (a[0] - b[0], a[1] - b[1])
def normalize(x, y):
return math.sqrt(x**2 + y**2)
print normalize(*xy)
print normalize(*x2y2)
|
<commit_before><commit_msg>Add normalize and Vector substraction functions<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
print 'Insert one points, separated with commas: '
xy = input('Insert first point: ')
x2y2 = input('Insert second point: ')
def substractVectors(a, b):
return (a[0] - b[0], a[1] - b[1])
def normalize(x, y):
return math.sqrt(x**2 + y**2)
print normalize(*xy)
print normalize(*x2y2)
|
|
e21d3a1da1d20a341ae165c0fa3605248744ce7e
|
altair/examples/cumulative_count_chart.py
|
altair/examples/cumulative_count_chart.py
|
"""
Cumulative Count Chart
----------------------
This example shows an area chart with cumulative count.
Adapted from https://vega.github.io/vega-lite/examples/area_cumulative_freq.html
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.movies.url
alt.Chart(source).transform_window(
cumulative_count="count(count)",
sort=[{"field": "IMDB_Rating"}],
).mark_area().encode(
x="IMDB_Rating:Q",
y="cumulative_count:Q"
)
|
Add example - Cumulative count
|
Add example - Cumulative count
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
Add example - Cumulative count
|
"""
Cumulative Count Chart
----------------------
This example shows an area chart with cumulative count.
Adapted from https://vega.github.io/vega-lite/examples/area_cumulative_freq.html
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.movies.url
alt.Chart(source).transform_window(
cumulative_count="count(count)",
sort=[{"field": "IMDB_Rating"}],
).mark_area().encode(
x="IMDB_Rating:Q",
y="cumulative_count:Q"
)
|
<commit_before><commit_msg>Add example - Cumulative count<commit_after>
|
"""
Cumulative Count Chart
----------------------
This example shows an area chart with cumulative count.
Adapted from https://vega.github.io/vega-lite/examples/area_cumulative_freq.html
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.movies.url
alt.Chart(source).transform_window(
cumulative_count="count(count)",
sort=[{"field": "IMDB_Rating"}],
).mark_area().encode(
x="IMDB_Rating:Q",
y="cumulative_count:Q"
)
|
Add example - Cumulative count"""
Cumulative Count Chart
----------------------
This example shows an area chart with cumulative count.
Adapted from https://vega.github.io/vega-lite/examples/area_cumulative_freq.html
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.movies.url
alt.Chart(source).transform_window(
cumulative_count="count(count)",
sort=[{"field": "IMDB_Rating"}],
).mark_area().encode(
x="IMDB_Rating:Q",
y="cumulative_count:Q"
)
|
<commit_before><commit_msg>Add example - Cumulative count<commit_after>"""
Cumulative Count Chart
----------------------
This example shows an area chart with cumulative count.
Adapted from https://vega.github.io/vega-lite/examples/area_cumulative_freq.html
"""
# category: area charts
import altair as alt
from vega_datasets import data
source = data.movies.url
alt.Chart(source).transform_window(
cumulative_count="count(count)",
sort=[{"field": "IMDB_Rating"}],
).mark_area().encode(
x="IMDB_Rating:Q",
y="cumulative_count:Q"
)
|
|
1a1a8c00e3dc4101be78a69b3bae7d27fbb51b39
|
ghpythonremote/examples/CPython_to_GH_manual.py
|
ghpythonremote/examples/CPython_to_GH_manual.py
|
import rpyc
c = rpyc.utils.factory.connect(
"localhost",
18871,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
rghcomp = c.root.get_component
rgh = c
Rhino = rgh.modules.Rhino
rs = rgh.modules.rhinoscriptsyntax
readopt = Rhino.FileIO.FileReadOptions()
readopt.BatchMode = True
Rhino.RhinoDoc.ReadFile(
r"C:\Users\pcuvil\CODE\ghpythonremote\ghpythonremote\examples\curves.3dm", readopt
)
type_curve = Rhino.DocObjects.ObjectType.Curve
curves = Rhino.RhinoDoc.ActiveDoc.Objects.FindByObjectType(type_curve)
curves_id = tuple(c.Id for c in curves)
gh_curves = rs.coerceguidlist(curves_id)
area = rghcomp("Area", is_cluster_component=False)
print(sum(area(gh_curves)[0]))
########################################
### Below is what to paste in Rhino Python
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer
class GhcompService(rpyc.ClassicService):
def on_connect(self, conn):
print("Incoming connection.")
super(GhcompService, self).on_connect(conn)
import ghpythonlib.components as ghcomp
self.ghcomp = ghcomp
def on_disconnect(self, conn):
print("Disconnected.")
def get_component(self, component_name, is_cluster_component=False):
component = getattr(self.ghcomp, component_name)
if is_cluster_component:
component = getattr(
component, component_name
)
# TODO: improve ghcomp to get clusters the same way we get compiled
# components, thus removing the need for a custom getter
return component
server = OneShotServer(
GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
|
Add a debugging file connecting Python to GH
|
Add a debugging file connecting Python to GH
|
Python
|
mit
|
pilcru/ghpythonremote,Digital-Structures/ghpythonremote
|
Add a debugging file connecting Python to GH
|
import rpyc
c = rpyc.utils.factory.connect(
"localhost",
18871,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
rghcomp = c.root.get_component
rgh = c
Rhino = rgh.modules.Rhino
rs = rgh.modules.rhinoscriptsyntax
readopt = Rhino.FileIO.FileReadOptions()
readopt.BatchMode = True
Rhino.RhinoDoc.ReadFile(
r"C:\Users\pcuvil\CODE\ghpythonremote\ghpythonremote\examples\curves.3dm", readopt
)
type_curve = Rhino.DocObjects.ObjectType.Curve
curves = Rhino.RhinoDoc.ActiveDoc.Objects.FindByObjectType(type_curve)
curves_id = tuple(c.Id for c in curves)
gh_curves = rs.coerceguidlist(curves_id)
area = rghcomp("Area", is_cluster_component=False)
print(sum(area(gh_curves)[0]))
########################################
### Below is what to paste in Rhino Python
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer
class GhcompService(rpyc.ClassicService):
def on_connect(self, conn):
print("Incoming connection.")
super(GhcompService, self).on_connect(conn)
import ghpythonlib.components as ghcomp
self.ghcomp = ghcomp
def on_disconnect(self, conn):
print("Disconnected.")
def get_component(self, component_name, is_cluster_component=False):
component = getattr(self.ghcomp, component_name)
if is_cluster_component:
component = getattr(
component, component_name
)
# TODO: improve ghcomp to get clusters the same way we get compiled
# components, thus removing the need for a custom getter
return component
server = OneShotServer(
GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
|
<commit_before><commit_msg>Add a debugging file connecting Python to GH<commit_after>
|
import rpyc
c = rpyc.utils.factory.connect(
"localhost",
18871,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
rghcomp = c.root.get_component
rgh = c
Rhino = rgh.modules.Rhino
rs = rgh.modules.rhinoscriptsyntax
readopt = Rhino.FileIO.FileReadOptions()
readopt.BatchMode = True
Rhino.RhinoDoc.ReadFile(
r"C:\Users\pcuvil\CODE\ghpythonremote\ghpythonremote\examples\curves.3dm", readopt
)
type_curve = Rhino.DocObjects.ObjectType.Curve
curves = Rhino.RhinoDoc.ActiveDoc.Objects.FindByObjectType(type_curve)
curves_id = tuple(c.Id for c in curves)
gh_curves = rs.coerceguidlist(curves_id)
area = rghcomp("Area", is_cluster_component=False)
print(sum(area(gh_curves)[0]))
########################################
### Below is what to paste in Rhino Python
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer
class GhcompService(rpyc.ClassicService):
def on_connect(self, conn):
print("Incoming connection.")
super(GhcompService, self).on_connect(conn)
import ghpythonlib.components as ghcomp
self.ghcomp = ghcomp
def on_disconnect(self, conn):
print("Disconnected.")
def get_component(self, component_name, is_cluster_component=False):
component = getattr(self.ghcomp, component_name)
if is_cluster_component:
component = getattr(
component, component_name
)
# TODO: improve ghcomp to get clusters the same way we get compiled
# components, thus removing the need for a custom getter
return component
server = OneShotServer(
GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
|
Add a debugging file connecting Python to GHimport rpyc
c = rpyc.utils.factory.connect(
"localhost",
18871,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
rghcomp = c.root.get_component
rgh = c
Rhino = rgh.modules.Rhino
rs = rgh.modules.rhinoscriptsyntax
readopt = Rhino.FileIO.FileReadOptions()
readopt.BatchMode = True
Rhino.RhinoDoc.ReadFile(
r"C:\Users\pcuvil\CODE\ghpythonremote\ghpythonremote\examples\curves.3dm", readopt
)
type_curve = Rhino.DocObjects.ObjectType.Curve
curves = Rhino.RhinoDoc.ActiveDoc.Objects.FindByObjectType(type_curve)
curves_id = tuple(c.Id for c in curves)
gh_curves = rs.coerceguidlist(curves_id)
area = rghcomp("Area", is_cluster_component=False)
print(sum(area(gh_curves)[0]))
########################################
### Below is what to paste in Rhino Python
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer
class GhcompService(rpyc.ClassicService):
def on_connect(self, conn):
print("Incoming connection.")
super(GhcompService, self).on_connect(conn)
import ghpythonlib.components as ghcomp
self.ghcomp = ghcomp
def on_disconnect(self, conn):
print("Disconnected.")
def get_component(self, component_name, is_cluster_component=False):
component = getattr(self.ghcomp, component_name)
if is_cluster_component:
component = getattr(
component, component_name
)
# TODO: improve ghcomp to get clusters the same way we get compiled
# components, thus removing the need for a custom getter
return component
server = OneShotServer(
GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
|
<commit_before><commit_msg>Add a debugging file connecting Python to GH<commit_after>import rpyc
c = rpyc.utils.factory.connect(
"localhost",
18871,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
rghcomp = c.root.get_component
rgh = c
Rhino = rgh.modules.Rhino
rs = rgh.modules.rhinoscriptsyntax
readopt = Rhino.FileIO.FileReadOptions()
readopt.BatchMode = True
Rhino.RhinoDoc.ReadFile(
r"C:\Users\pcuvil\CODE\ghpythonremote\ghpythonremote\examples\curves.3dm", readopt
)
type_curve = Rhino.DocObjects.ObjectType.Curve
curves = Rhino.RhinoDoc.ActiveDoc.Objects.FindByObjectType(type_curve)
curves_id = tuple(c.Id for c in curves)
gh_curves = rs.coerceguidlist(curves_id)
area = rghcomp("Area", is_cluster_component=False)
print(sum(area(gh_curves)[0]))
########################################
### Below is what to paste in Rhino Python
from ghpythonremote import rpyc
from rpyc.utils.server import OneShotServer
class GhcompService(rpyc.ClassicService):
def on_connect(self, conn):
print("Incoming connection.")
super(GhcompService, self).on_connect(conn)
import ghpythonlib.components as ghcomp
self.ghcomp = ghcomp
def on_disconnect(self, conn):
print("Disconnected.")
def get_component(self, component_name, is_cluster_component=False):
component = getattr(self.ghcomp, component_name)
if is_cluster_component:
component = getattr(
component, component_name
)
# TODO: improve ghcomp to get clusters the same way we get compiled
# components, thus removing the need for a custom getter
return component
server = OneShotServer(
GhcompService, hostname="localhost", port=18871, listener_timeout=None
)
server.start()
|
|
d9db8056450528f10b9bfaa9f6226486108ac37d
|
creation/lib/cgWConsts.py
|
creation/lib/cgWConsts.py
|
####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.1 2007/10/12 20:20:26 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
|
Put constants into a dedicated module
|
Put constants into a dedicated module
|
Python
|
bsd-3-clause
|
bbockelm/glideinWMS,holzman/glideinwms-old,holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old
|
Put constants into a dedicated module
|
####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.1 2007/10/12 20:20:26 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
|
<commit_before><commit_msg>Put constants into a dedicated module<commit_after>
|
####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.1 2007/10/12 20:20:26 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
|
Put constants into a dedicated module####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.1 2007/10/12 20:20:26 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
|
<commit_before><commit_msg>Put constants into a dedicated module<commit_after>####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.1 2007/10/12 20:20:26 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
|
|
3756fa9cef258e91ebd8d344f6773cfc2ac537dd
|
benchmarks/bench_misc.py
|
benchmarks/bench_misc.py
|
"""
Miscellaneous benchmarks.
"""
import subprocess
import sys
import time
from numba import jit
class InitializationTime:
# Measure wall clock time, not CPU time of calling process
timer = time.time
number = 1
repeat = 10
def time_new_process_import_numba(self):
subprocess.check_call([sys.executable, "-c", "from numba import jit"])
|
Add a benchmark of import time
|
Add a benchmark of import time
|
Python
|
bsd-2-clause
|
numba/numba-benchmark,gmarkall/numba-benchmark
|
Add a benchmark of import time
|
"""
Miscellaneous benchmarks.
"""
import subprocess
import sys
import time
from numba import jit
class InitializationTime:
# Measure wall clock time, not CPU time of calling process
timer = time.time
number = 1
repeat = 10
def time_new_process_import_numba(self):
subprocess.check_call([sys.executable, "-c", "from numba import jit"])
|
<commit_before><commit_msg>Add a benchmark of import time<commit_after>
|
"""
Miscellaneous benchmarks.
"""
import subprocess
import sys
import time
from numba import jit
class InitializationTime:
# Measure wall clock time, not CPU time of calling process
timer = time.time
number = 1
repeat = 10
def time_new_process_import_numba(self):
subprocess.check_call([sys.executable, "-c", "from numba import jit"])
|
Add a benchmark of import time"""
Miscellaneous benchmarks.
"""
import subprocess
import sys
import time
from numba import jit
class InitializationTime:
# Measure wall clock time, not CPU time of calling process
timer = time.time
number = 1
repeat = 10
def time_new_process_import_numba(self):
subprocess.check_call([sys.executable, "-c", "from numba import jit"])
|
<commit_before><commit_msg>Add a benchmark of import time<commit_after>"""
Miscellaneous benchmarks.
"""
import subprocess
import sys
import time
from numba import jit
class InitializationTime:
# Measure wall clock time, not CPU time of calling process
timer = time.time
number = 1
repeat = 10
def time_new_process_import_numba(self):
subprocess.check_call([sys.executable, "-c", "from numba import jit"])
|
|
39039b8a6584984d6778b271a4b9c8a181198cd3
|
corehq/apps/hqcase/tests/test_bulk.py
|
corehq/apps/hqcase/tests/test_bulk.py
|
import uuid
from unittest import mock
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from corehq.apps.hqcase.bulk import CaseBulkDB
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import FormProcessorTestUtils
@mock.patch('corehq.apps.hqcase.bulk.CASEBLOCK_CHUNKSIZE', new=5)
class TestUpdateCases(TestCase):
domain = 'test_bulk_update_cases'
def tearDown(self):
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
super().tearDown()
def test(self):
with CaseBulkDB(self.domain, 'my_user_id', 'my_device_id') as bulk_db:
for i in range(1, 18):
bulk_db.save(CaseBlock(
create=True,
case_id=str(uuid.uuid4()),
case_type='patient',
case_name=f"case_{i}",
update={'phase': '1'},
))
self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain)), 4)
case_accessor = CaseAccessors(self.domain)
self.assertEqual(len(case_accessor.get_case_ids_in_domain()), 17)
|
Add basic test for CaseBulkDB
|
Add basic test for CaseBulkDB
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add basic test for CaseBulkDB
|
import uuid
from unittest import mock
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from corehq.apps.hqcase.bulk import CaseBulkDB
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import FormProcessorTestUtils
@mock.patch('corehq.apps.hqcase.bulk.CASEBLOCK_CHUNKSIZE', new=5)
class TestUpdateCases(TestCase):
domain = 'test_bulk_update_cases'
def tearDown(self):
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
super().tearDown()
def test(self):
with CaseBulkDB(self.domain, 'my_user_id', 'my_device_id') as bulk_db:
for i in range(1, 18):
bulk_db.save(CaseBlock(
create=True,
case_id=str(uuid.uuid4()),
case_type='patient',
case_name=f"case_{i}",
update={'phase': '1'},
))
self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain)), 4)
case_accessor = CaseAccessors(self.domain)
self.assertEqual(len(case_accessor.get_case_ids_in_domain()), 17)
|
<commit_before><commit_msg>Add basic test for CaseBulkDB<commit_after>
|
import uuid
from unittest import mock
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from corehq.apps.hqcase.bulk import CaseBulkDB
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import FormProcessorTestUtils
@mock.patch('corehq.apps.hqcase.bulk.CASEBLOCK_CHUNKSIZE', new=5)
class TestUpdateCases(TestCase):
domain = 'test_bulk_update_cases'
def tearDown(self):
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
super().tearDown()
def test(self):
with CaseBulkDB(self.domain, 'my_user_id', 'my_device_id') as bulk_db:
for i in range(1, 18):
bulk_db.save(CaseBlock(
create=True,
case_id=str(uuid.uuid4()),
case_type='patient',
case_name=f"case_{i}",
update={'phase': '1'},
))
self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain)), 4)
case_accessor = CaseAccessors(self.domain)
self.assertEqual(len(case_accessor.get_case_ids_in_domain()), 17)
|
Add basic test for CaseBulkDBimport uuid
from unittest import mock
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from corehq.apps.hqcase.bulk import CaseBulkDB
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import FormProcessorTestUtils
@mock.patch('corehq.apps.hqcase.bulk.CASEBLOCK_CHUNKSIZE', new=5)
class TestUpdateCases(TestCase):
domain = 'test_bulk_update_cases'
def tearDown(self):
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
super().tearDown()
def test(self):
with CaseBulkDB(self.domain, 'my_user_id', 'my_device_id') as bulk_db:
for i in range(1, 18):
bulk_db.save(CaseBlock(
create=True,
case_id=str(uuid.uuid4()),
case_type='patient',
case_name=f"case_{i}",
update={'phase': '1'},
))
self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain)), 4)
case_accessor = CaseAccessors(self.domain)
self.assertEqual(len(case_accessor.get_case_ids_in_domain()), 17)
|
<commit_before><commit_msg>Add basic test for CaseBulkDB<commit_after>import uuid
from unittest import mock
from django.test import TestCase
from casexml.apps.case.mock import CaseBlock
from corehq.apps.hqcase.bulk import CaseBulkDB
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import XFormInstance
from corehq.form_processor.tests.utils import FormProcessorTestUtils
@mock.patch('corehq.apps.hqcase.bulk.CASEBLOCK_CHUNKSIZE', new=5)
class TestUpdateCases(TestCase):
domain = 'test_bulk_update_cases'
def tearDown(self):
FormProcessorTestUtils.delete_all_xforms()
FormProcessorTestUtils.delete_all_cases()
super().tearDown()
def test(self):
with CaseBulkDB(self.domain, 'my_user_id', 'my_device_id') as bulk_db:
for i in range(1, 18):
bulk_db.save(CaseBlock(
create=True,
case_id=str(uuid.uuid4()),
case_type='patient',
case_name=f"case_{i}",
update={'phase': '1'},
))
self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain)), 4)
case_accessor = CaseAccessors(self.domain)
self.assertEqual(len(case_accessor.get_case_ids_in_domain()), 17)
|
|
e3845e03ea1f3dd393cd077f7c2b3d2ce31b86a9
|
mysite/scripts/remove_numbers_from_locations.py
|
mysite/scripts/remove_numbers_from_locations.py
|
import re
import mysite
Person = mysite.profile.models.Person
people_with_weird_locations = Person.objects.filter(location_display_name__regex=', [0-9][0-9],')
for p in people_with_weird_locations:
location_pieces = re.split(r', \d\d', p.location_display_name)
unweirded_location = "".join(location_pieces)
p.location_display_name = unweirded_location
p.save()
|
Add script to remove unnecessary numbers from people's locations.
|
Add script to remove unnecessary numbers from people's locations.
|
Python
|
agpl-3.0
|
SnappleCap/oh-mainline,ehashman/oh-mainline,mzdaniel/oh-mainline,jledbetter/openhatch,sudheesh001/oh-mainline,openhatch/oh-mainline,jledbetter/openhatch,SnappleCap/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,waseem18/oh-mainline,vipul-sharma20/oh-mainline,jledbetter/openhatch,ojengwa/oh-mainline,waseem18/oh-mainline,eeshangarg/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,campbe13/openhatch,eeshangarg/oh-mainline,ehashman/oh-mainline,SnappleCap/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,Changaco/oh-mainline,mzdaniel/oh-mainline,willingc/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,openhatch/oh-mainline,openhatch/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,willingc/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,eeshangarg/oh-mainline,willingc/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,moijes12/oh-mainline,onceuponatimeforever/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,eeshangarg/oh-mainline,heeraj123/oh-mainline,jledbetter/openhatch,campbe13/openhatch,ojengwa/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,Changaco/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,ojengwa/oh-mainline,sudheesh001/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,jledbetter/openhatch,ojengwa/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,mzdaniel/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,ehashman/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,heeraj123/oh-mainline
|
Add script to remove unnecessary numbers from people's locations.
|
import re
import mysite
Person = mysite.profile.models.Person
people_with_weird_locations = Person.objects.filter(location_display_name__regex=', [0-9][0-9],')
for p in people_with_weird_locations:
location_pieces = re.split(r', \d\d', p.location_display_name)
unweirded_location = "".join(location_pieces)
p.location_display_name = unweirded_location
p.save()
|
<commit_before><commit_msg>Add script to remove unnecessary numbers from people's locations.<commit_after>
|
import re
import mysite
Person = mysite.profile.models.Person
people_with_weird_locations = Person.objects.filter(location_display_name__regex=', [0-9][0-9],')
for p in people_with_weird_locations:
location_pieces = re.split(r', \d\d', p.location_display_name)
unweirded_location = "".join(location_pieces)
p.location_display_name = unweirded_location
p.save()
|
Add script to remove unnecessary numbers from people's locations.import re
import mysite
Person = mysite.profile.models.Person
people_with_weird_locations = Person.objects.filter(location_display_name__regex=', [0-9][0-9],')
for p in people_with_weird_locations:
location_pieces = re.split(r', \d\d', p.location_display_name)
unweirded_location = "".join(location_pieces)
p.location_display_name = unweirded_location
p.save()
|
<commit_before><commit_msg>Add script to remove unnecessary numbers from people's locations.<commit_after>import re
import mysite
Person = mysite.profile.models.Person
people_with_weird_locations = Person.objects.filter(location_display_name__regex=', [0-9][0-9],')
for p in people_with_weird_locations:
location_pieces = re.split(r', \d\d', p.location_display_name)
unweirded_location = "".join(location_pieces)
p.location_display_name = unweirded_location
p.save()
|
|
642b777f5eabc02abd54208e3895c0b1898401db
|
edit-cosine-cluster.py
|
edit-cosine-cluster.py
|
#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[0]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[0]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("clusters.json", "w") as f: #Pass the json file as input to cluster-d3.html or dynamic-cluster.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
Convert csv to json for clusters
|
Convert csv to json for clusters
Convert csv to json for cluster-d3.html(or dynamic-cluster.html)
|
Python
|
apache-2.0
|
YongchaoShang/tika-img-similarity,chrismattmann/tika-similarity,harsham05/tika-similarity,chrismattmann/tika-similarity,chrismattmann/tika-img-similarity,YongchaoShang/tika-img-similarity,harsham05/tika-similarity,chrismattmann/tika-img-similarity
|
Convert csv to json for clusters
Convert csv to json for cluster-d3.html(or dynamic-cluster.html)
|
#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[0]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[0]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("clusters.json", "w") as f: #Pass the json file as input to cluster-d3.html or dynamic-cluster.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
<commit_before><commit_msg>Convert csv to json for clusters
Convert csv to json for cluster-d3.html(or dynamic-cluster.html)<commit_after>
|
#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[0]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[0]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("clusters.json", "w") as f: #Pass the json file as input to cluster-d3.html or dynamic-cluster.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
Convert csv to json for clusters
Convert csv to json for cluster-d3.html(or dynamic-cluster.html)#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[0]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[0]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("clusters.json", "w") as f: #Pass the json file as input to cluster-d3.html or dynamic-cluster.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
<commit_before><commit_msg>Convert csv to json for clusters
Convert csv to json for cluster-d3.html(or dynamic-cluster.html)<commit_after>#!/usr/bin/env python2.7
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import json
import sys
import csv
row=[]
csvPath = sys.argv[1] #Input Path to csv file
with open(csvPath,"r") as f:
lines = csv.reader(f.read().splitlines(), delimiter=' ')
for line in lines:
row.append(line)
data={}
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
data[column[0]]=[]
for i in range(len(row)):
if "x-coordinate" in row[i][0].split(","):
continue
else:
column = row[i][0].split(",")
second={}
second["name"]=column[1]+" "+column[2]
second["size"]=column[2]
data[column[0]].append(second)
clusterList = []
i=0
for elem in data.keys():
first={}
first["name"]="cluster "+str(i)
first["children"]=data[elem]
clusterList.append(first)
i+=1
print json.dumps(clusterList, sort_keys=True, indent=4, separators=(',', ': '))
clusterStruct = {"name":"clusters", "children":clusterList}
with open("clusters.json", "w") as f: #Pass the json file as input to cluster-d3.html or dynamic-cluster.html
f.write(json.dumps(clusterStruct, sort_keys=True, indent=4, separators=(',', ': ')))
|
|
6fb6829c6ac0f755b468393042f538c5073c76ff
|
tools/rename_by_timestamp/rename_by_timestamp.py
|
tools/rename_by_timestamp/rename_by_timestamp.py
|
import os, os.path
import shutil
from datetime import datetime
import sys
def do_folder(src_folder):
assert os.path.isdir(src_folder)
dst_folder = os.path.join(src_folder, 'timestamp_prefixed')
print 'Input dir: %s'%src_folder
print 'Output dir: %s'%dst_folder
if not os.path.exists(dst_folder):
print 'Creating %s' %dst_folder
os.makedirs(dst_folder)
for f in os.listdir(src_folder):
base,ext = os.path.splitext(f)
src_file = os.path.join(src_folder, f)
if os.path.isfile(src_file) and ext.lower() == '.png':
ctime = os.path.getctime(src_file)
dt = datetime.utcfromtimestamp(ctime)
dst_file = os.path.join(dst_folder, dt.strftime("%Y_%m_%d_")+f)
print '%s ---> %s' % (src_file,dst_file)
shutil.copyfile(src_file,dst_file)
if __name__ == '__main__':
folders = sys.argv[1:]
if len(folders) == 0:
do_folder('.')
else:
for f in folders:
do_folder(f)
|
Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)
|
Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)
|
Python
|
mit
|
joymachinegames/joymachine-public,joymachinegames/joymachine-public,joymachinegames/joymachine-public,joymachinegames/joymachine-public,joymachinegames/joymachine-public,joymachinegames/joymachine-public
|
Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)
|
import os, os.path
import shutil
from datetime import datetime
import sys
def do_folder(src_folder):
assert os.path.isdir(src_folder)
dst_folder = os.path.join(src_folder, 'timestamp_prefixed')
print 'Input dir: %s'%src_folder
print 'Output dir: %s'%dst_folder
if not os.path.exists(dst_folder):
print 'Creating %s' %dst_folder
os.makedirs(dst_folder)
for f in os.listdir(src_folder):
base,ext = os.path.splitext(f)
src_file = os.path.join(src_folder, f)
if os.path.isfile(src_file) and ext.lower() == '.png':
ctime = os.path.getctime(src_file)
dt = datetime.utcfromtimestamp(ctime)
dst_file = os.path.join(dst_folder, dt.strftime("%Y_%m_%d_")+f)
print '%s ---> %s' % (src_file,dst_file)
shutil.copyfile(src_file,dst_file)
if __name__ == '__main__':
folders = sys.argv[1:]
if len(folders) == 0:
do_folder('.')
else:
for f in folders:
do_folder(f)
|
<commit_before><commit_msg>Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)<commit_after>
|
import os, os.path
import shutil
from datetime import datetime
import sys
def do_folder(src_folder):
assert os.path.isdir(src_folder)
dst_folder = os.path.join(src_folder, 'timestamp_prefixed')
print 'Input dir: %s'%src_folder
print 'Output dir: %s'%dst_folder
if not os.path.exists(dst_folder):
print 'Creating %s' %dst_folder
os.makedirs(dst_folder)
for f in os.listdir(src_folder):
base,ext = os.path.splitext(f)
src_file = os.path.join(src_folder, f)
if os.path.isfile(src_file) and ext.lower() == '.png':
ctime = os.path.getctime(src_file)
dt = datetime.utcfromtimestamp(ctime)
dst_file = os.path.join(dst_folder, dt.strftime("%Y_%m_%d_")+f)
print '%s ---> %s' % (src_file,dst_file)
shutil.copyfile(src_file,dst_file)
if __name__ == '__main__':
folders = sys.argv[1:]
if len(folders) == 0:
do_folder('.')
else:
for f in folders:
do_folder(f)
|
Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)import os, os.path
import shutil
from datetime import datetime
import sys
def do_folder(src_folder):
assert os.path.isdir(src_folder)
dst_folder = os.path.join(src_folder, 'timestamp_prefixed')
print 'Input dir: %s'%src_folder
print 'Output dir: %s'%dst_folder
if not os.path.exists(dst_folder):
print 'Creating %s' %dst_folder
os.makedirs(dst_folder)
for f in os.listdir(src_folder):
base,ext = os.path.splitext(f)
src_file = os.path.join(src_folder, f)
if os.path.isfile(src_file) and ext.lower() == '.png':
ctime = os.path.getctime(src_file)
dt = datetime.utcfromtimestamp(ctime)
dst_file = os.path.join(dst_folder, dt.strftime("%Y_%m_%d_")+f)
print '%s ---> %s' % (src_file,dst_file)
shutil.copyfile(src_file,dst_file)
if __name__ == '__main__':
folders = sys.argv[1:]
if len(folders) == 0:
do_folder('.')
else:
for f in folders:
do_folder(f)
|
<commit_before><commit_msg>Add script to create copies of PNG files w/ filenames prefixed with their creation time (Windows only)<commit_after>import os, os.path
import shutil
from datetime import datetime
import sys
def do_folder(src_folder):
assert os.path.isdir(src_folder)
dst_folder = os.path.join(src_folder, 'timestamp_prefixed')
print 'Input dir: %s'%src_folder
print 'Output dir: %s'%dst_folder
if not os.path.exists(dst_folder):
print 'Creating %s' %dst_folder
os.makedirs(dst_folder)
for f in os.listdir(src_folder):
base,ext = os.path.splitext(f)
src_file = os.path.join(src_folder, f)
if os.path.isfile(src_file) and ext.lower() == '.png':
ctime = os.path.getctime(src_file)
dt = datetime.utcfromtimestamp(ctime)
dst_file = os.path.join(dst_folder, dt.strftime("%Y_%m_%d_")+f)
print '%s ---> %s' % (src_file,dst_file)
shutil.copyfile(src_file,dst_file)
if __name__ == '__main__':
folders = sys.argv[1:]
if len(folders) == 0:
do_folder('.')
else:
for f in folders:
do_folder(f)
|
|
1b971b32d57df48919e0c45372237d858996e3d9
|
tests/test_gmeu/test_a_C.py
|
tests/test_gmeu/test_a_C.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test GMEU entry 'a', part C.
This entry is about pronunciation of the word 'a' and is unlikely to be
relevant to usage in written language.
"""
|
Add test for GMEU entry "a", part C
|
Add test for GMEU entry "a", part C
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint
|
Add test for GMEU entry "a", part C
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test GMEU entry 'a', part C.
This entry is about pronunciation of the word 'a' and is unlikely to be
relevant to usage in written language.
"""
|
<commit_before><commit_msg>Add test for GMEU entry "a", part C<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test GMEU entry 'a', part C.
This entry is about pronunciation of the word 'a' and is unlikely to be
relevant to usage in written language.
"""
|
Add test for GMEU entry "a", part C#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test GMEU entry 'a', part C.
This entry is about pronunciation of the word 'a' and is unlikely to be
relevant to usage in written language.
"""
|
<commit_before><commit_msg>Add test for GMEU entry "a", part C<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test GMEU entry 'a', part C.
This entry is about pronunciation of the word 'a' and is unlikely to be
relevant to usage in written language.
"""
|
|
cea54800ae2ac13b830984aa368125faf02ba429
|
examples/cli_object.py
|
examples/cli_object.py
|
import hug
@hug.cli_object(name='git', version='1.0.0')
class GIT(object):
"""An example of command like calls via an Object"""
@hug.cli_object()
def push(self, branch='master'):
return 'Pushing {}'.format(branch)
@hug.cli_object()
def pull(self, branch='master'):
return 'Pulling {}'.format(branch)
if __name__ == '__main__':
GIT.cli()
|
Add example for CLI object
|
Add example for CLI object
|
Python
|
mit
|
MuhammadAlkarouri/hug,timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug
|
Add example for CLI object
|
import hug
@hug.cli_object(name='git', version='1.0.0')
class GIT(object):
"""An example of command like calls via an Object"""
@hug.cli_object()
def push(self, branch='master'):
return 'Pushing {}'.format(branch)
@hug.cli_object()
def pull(self, branch='master'):
return 'Pulling {}'.format(branch)
if __name__ == '__main__':
GIT.cli()
|
<commit_before><commit_msg>Add example for CLI object<commit_after>
|
import hug
@hug.cli_object(name='git', version='1.0.0')
class GIT(object):
"""An example of command like calls via an Object"""
@hug.cli_object()
def push(self, branch='master'):
return 'Pushing {}'.format(branch)
@hug.cli_object()
def pull(self, branch='master'):
return 'Pulling {}'.format(branch)
if __name__ == '__main__':
GIT.cli()
|
Add example for CLI objectimport hug
@hug.cli_object(name='git', version='1.0.0')
class GIT(object):
"""An example of command like calls via an Object"""
@hug.cli_object()
def push(self, branch='master'):
return 'Pushing {}'.format(branch)
@hug.cli_object()
def pull(self, branch='master'):
return 'Pulling {}'.format(branch)
if __name__ == '__main__':
GIT.cli()
|
<commit_before><commit_msg>Add example for CLI object<commit_after>import hug
@hug.cli_object(name='git', version='1.0.0')
class GIT(object):
"""An example of command like calls via an Object"""
@hug.cli_object()
def push(self, branch='master'):
return 'Pushing {}'.format(branch)
@hug.cli_object()
def pull(self, branch='master'):
return 'Pulling {}'.format(branch)
if __name__ == '__main__':
GIT.cli()
|
|
fd48746e8d1d2d94a7453a4a976ffbf362711f63
|
tools/setbyte.py
|
tools/setbyte.py
|
#!/usr/bin/env python3
import sys
import ast
try:
file_name, location, new_byte = sys.argv[1:]
except:
print("Usage: ./{} file location newbyte".format(sys.argv[0]))
sys.exit(1)
addr = int(ast.literal_eval(location))
byte = int(ast.literal_eval(new_byte))
assert 0 <= addr
assert 0 <= byte < 2**8
with open(file_name, mode="br+") as f:
f.seek(addr)
f.write(bytes([byte]))
|
Add new tool to set bytes in file
|
Add new tool to set bytes in file
|
Python
|
mit
|
Dentosal/rust_os,Dentosal/rust_os,Dentosal/rust_os
|
Add new tool to set bytes in file
|
#!/usr/bin/env python3
import sys
import ast
try:
file_name, location, new_byte = sys.argv[1:]
except:
print("Usage: ./{} file location newbyte".format(sys.argv[0]))
sys.exit(1)
addr = int(ast.literal_eval(location))
byte = int(ast.literal_eval(new_byte))
assert 0 <= addr
assert 0 <= byte < 2**8
with open(file_name, mode="br+") as f:
f.seek(addr)
f.write(bytes([byte]))
|
<commit_before><commit_msg>Add new tool to set bytes in file<commit_after>
|
#!/usr/bin/env python3
import sys
import ast
try:
file_name, location, new_byte = sys.argv[1:]
except:
print("Usage: ./{} file location newbyte".format(sys.argv[0]))
sys.exit(1)
addr = int(ast.literal_eval(location))
byte = int(ast.literal_eval(new_byte))
assert 0 <= addr
assert 0 <= byte < 2**8
with open(file_name, mode="br+") as f:
f.seek(addr)
f.write(bytes([byte]))
|
Add new tool to set bytes in file#!/usr/bin/env python3
import sys
import ast
try:
file_name, location, new_byte = sys.argv[1:]
except:
print("Usage: ./{} file location newbyte".format(sys.argv[0]))
sys.exit(1)
addr = int(ast.literal_eval(location))
byte = int(ast.literal_eval(new_byte))
assert 0 <= addr
assert 0 <= byte < 2**8
with open(file_name, mode="br+") as f:
f.seek(addr)
f.write(bytes([byte]))
|
<commit_before><commit_msg>Add new tool to set bytes in file<commit_after>#!/usr/bin/env python3
import sys
import ast
try:
file_name, location, new_byte = sys.argv[1:]
except:
print("Usage: ./{} file location newbyte".format(sys.argv[0]))
sys.exit(1)
addr = int(ast.literal_eval(location))
byte = int(ast.literal_eval(new_byte))
assert 0 <= addr
assert 0 <= byte < 2**8
with open(file_name, mode="br+") as f:
f.seek(addr)
f.write(bytes([byte]))
|
|
47c4043dfecd95686b1520c93717fa4c512c4e74
|
tlsenum/hello_constructs.py
|
tlsenum/hello_constructs.py
|
from construct import Array, Bytes, Struct, UBInt16, UBInt32, UBInt8
ProtocolVersion = Struct(
"version",
UBInt8("major"),
UBInt8("minor")
)
Random = Struct(
"random",
UBInt32("gmt_unix_time"),
Bytes("random_bytes", 28)
)
SessionID = Struct(
"session_id",
UBInt8("length"),
Bytes("session_id", lambda ctx: ctx.length)
)
CipherSuites = Struct(
"cipher_suites",
UBInt16("length"),
Array(lambda ctx: ctx.length / 2, Bytes("cipher_suites", 2))
)
CompressionMethods = Struct(
"compression_methods",
UBInt8("length"),
Array(lambda ctx: ctx.length, UBInt8("compression_methods"))
)
ClientHello = Struct(
"ClientHello",
ProtocolVersion,
Random,
SessionID,
CipherSuites,
CompressionMethods,
UBInt16("extensions_length"),
Bytes("extensions_bytes", lambda ctx: ctx.extensions_length),
)
|
Add tentative construct format for ClientHello messages.
|
Add tentative construct format for ClientHello messages.
|
Python
|
mit
|
Ayrx/tlsenum,Ayrx/tlsenum
|
Add tentative construct format for ClientHello messages.
|
from construct import Array, Bytes, Struct, UBInt16, UBInt32, UBInt8
ProtocolVersion = Struct(
"version",
UBInt8("major"),
UBInt8("minor")
)
Random = Struct(
"random",
UBInt32("gmt_unix_time"),
Bytes("random_bytes", 28)
)
SessionID = Struct(
"session_id",
UBInt8("length"),
Bytes("session_id", lambda ctx: ctx.length)
)
CipherSuites = Struct(
"cipher_suites",
UBInt16("length"),
Array(lambda ctx: ctx.length / 2, Bytes("cipher_suites", 2))
)
CompressionMethods = Struct(
"compression_methods",
UBInt8("length"),
Array(lambda ctx: ctx.length, UBInt8("compression_methods"))
)
ClientHello = Struct(
"ClientHello",
ProtocolVersion,
Random,
SessionID,
CipherSuites,
CompressionMethods,
UBInt16("extensions_length"),
Bytes("extensions_bytes", lambda ctx: ctx.extensions_length),
)
|
<commit_before><commit_msg>Add tentative construct format for ClientHello messages.<commit_after>
|
from construct import Array, Bytes, Struct, UBInt16, UBInt32, UBInt8
ProtocolVersion = Struct(
"version",
UBInt8("major"),
UBInt8("minor")
)
Random = Struct(
"random",
UBInt32("gmt_unix_time"),
Bytes("random_bytes", 28)
)
SessionID = Struct(
"session_id",
UBInt8("length"),
Bytes("session_id", lambda ctx: ctx.length)
)
CipherSuites = Struct(
"cipher_suites",
UBInt16("length"),
Array(lambda ctx: ctx.length / 2, Bytes("cipher_suites", 2))
)
CompressionMethods = Struct(
"compression_methods",
UBInt8("length"),
Array(lambda ctx: ctx.length, UBInt8("compression_methods"))
)
ClientHello = Struct(
"ClientHello",
ProtocolVersion,
Random,
SessionID,
CipherSuites,
CompressionMethods,
UBInt16("extensions_length"),
Bytes("extensions_bytes", lambda ctx: ctx.extensions_length),
)
|
Add tentative construct format for ClientHello messages.from construct import Array, Bytes, Struct, UBInt16, UBInt32, UBInt8
ProtocolVersion = Struct(
"version",
UBInt8("major"),
UBInt8("minor")
)
Random = Struct(
"random",
UBInt32("gmt_unix_time"),
Bytes("random_bytes", 28)
)
SessionID = Struct(
"session_id",
UBInt8("length"),
Bytes("session_id", lambda ctx: ctx.length)
)
CipherSuites = Struct(
"cipher_suites",
UBInt16("length"),
Array(lambda ctx: ctx.length / 2, Bytes("cipher_suites", 2))
)
CompressionMethods = Struct(
"compression_methods",
UBInt8("length"),
Array(lambda ctx: ctx.length, UBInt8("compression_methods"))
)
ClientHello = Struct(
"ClientHello",
ProtocolVersion,
Random,
SessionID,
CipherSuites,
CompressionMethods,
UBInt16("extensions_length"),
Bytes("extensions_bytes", lambda ctx: ctx.extensions_length),
)
|
<commit_before><commit_msg>Add tentative construct format for ClientHello messages.<commit_after>from construct import Array, Bytes, Struct, UBInt16, UBInt32, UBInt8
ProtocolVersion = Struct(
"version",
UBInt8("major"),
UBInt8("minor")
)
Random = Struct(
"random",
UBInt32("gmt_unix_time"),
Bytes("random_bytes", 28)
)
SessionID = Struct(
"session_id",
UBInt8("length"),
Bytes("session_id", lambda ctx: ctx.length)
)
CipherSuites = Struct(
"cipher_suites",
UBInt16("length"),
Array(lambda ctx: ctx.length / 2, Bytes("cipher_suites", 2))
)
CompressionMethods = Struct(
"compression_methods",
UBInt8("length"),
Array(lambda ctx: ctx.length, UBInt8("compression_methods"))
)
ClientHello = Struct(
"ClientHello",
ProtocolVersion,
Random,
SessionID,
CipherSuites,
CompressionMethods,
UBInt16("extensions_length"),
Bytes("extensions_bytes", lambda ctx: ctx.extensions_length),
)
|
|
5a12137c3e766451d83e6598ba93d16e0a8cbde8
|
tests/basics/subclass-native3.py
|
tests/basics/subclass-native3.py
|
class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
# TODO: Prints native base class name
#print(repr(e))
print(e.args)
|
Add test for accessing attribute of inherited native type.
|
tests: Add test for accessing attribute of inherited native type.
|
Python
|
mit
|
noahchense/micropython,matthewelse/micropython,micropython/micropython-esp32,Vogtinator/micropython,danicampora/micropython,adafruit/micropython,warner83/micropython,emfcamp/micropython,MrSurly/micropython-esp32,noahwilliamsson/micropython,mpalomer/micropython,kostyll/micropython,EcmaXp/micropython,drrk/micropython,MrSurly/micropython-esp32,xhat/micropython,Timmenem/micropython,lowRISC/micropython,hiway/micropython,chrisdearman/micropython,lbattraw/micropython,pozetroninc/micropython,ahotam/micropython,ernesto-g/micropython,lowRISC/micropython,TDAbboud/micropython,vitiral/micropython,paul-xxx/micropython,chrisdearman/micropython,ganshun666/micropython,micropython/micropython-esp32,jimkmc/micropython,dhylands/micropython,dhylands/micropython,mhoffma/micropython,mianos/micropython,tobbad/micropython,EcmaXp/micropython,skybird6672/micropython,rubencabrera/micropython,blmorris/micropython,blazewicz/micropython,alex-march/micropython,suda/micropython,cnoviello/micropython,pfalcon/micropython,dxxb/micropython,TDAbboud/micropython,chrisdearman/micropython,xhat/micropython,MrSurly/micropython,galenhz/micropython,oopy/micropython,trezor/micropython,feilongfl/micropython,jlillest/micropython,Timmenem/micropython,mpalomer/micropython,ChuckM/micropython,micropython/micropython-esp32,tuc-osg/micropython,firstval/micropython,slzatz/micropython,swegener/micropython,Peetz0r/micropython-esp32,drrk/micropython,selste/micropython,stonegithubs/micropython,blazewicz/micropython,stonegithubs/micropython,MrSurly/micropython-esp32,heisewangluo/micropython,vriera/micropython,Timmenem/micropython,toolmacher/micropython,mgyenik/micropython,noahchense/micropython,KISSMonX/micropython,tuc-osg/micropython,adafruit/micropython,blazewicz/micropython,lowRISC/micropython,MrSurly/micropython-esp32,HenrikSolver/micropython,KISSMonX/micropython,blmorris/micropython,PappaPeppar/micropython,bvernoux/micropython,SungEun-Steve-Kim/test-mp,orionrobots/micropython,misterdanb/micropython,rubencabrera/micropython,suda/micropython,tralamazza/micropython,noahchense/micropython,suda/micropython,utopiaprince/micropython,jmarcelino/pycom-micropython,KISSMonX/micropython,Peetz0r/micropython-esp32,turbinenreiter/micropython,mpalomer/micropython,tdautc19841202/micropython,xuxiaoxin/micropython,alex-march/micropython,dmazzella/micropython,Peetz0r/micropython-esp32,supergis/micropython,utopiaprince/micropython,dxxb/micropython,praemdonck/micropython,SHA2017-badge/micropython-esp32,drrk/micropython,martinribelotta/micropython,pramasoul/micropython,trezor/micropython,swegener/micropython,neilh10/micropython,feilongfl/micropython,supergis/micropython,MrSurly/micropython,xhat/micropython,trezor/micropython,praemdonck/micropython,cnoviello/micropython,deshipu/micropython,jlillest/micropython,bvernoux/micropython,EcmaXp/micropython,orionrobots/micropython,tuc-osg/micropython,alex-march/micropython,tdautc19841202/micropython,rubencabrera/micropython,galenhz/micropython,adamkh/micropython,swegener/micropython,ryannathans/micropython,lbattraw/micropython,alex-robbins/micropython,noahwilliamsson/micropython,pramasoul/micropython,oopy/micropython,feilongfl/micropython,aethaniel/micropython,infinnovation/micropython,turbinenreiter/micropython,infinnovation/micropython,bvernoux/micropython,toolmacher/micropython,hosaka/micropython,lowRISC/micropython,xyb/micropython,neilh10/micropython,martinribelotta/micropython,adafruit/micropython,puuu/micropython,misterdanb/micropython,ganshun666/micropython,matthewelse/micropython,ChuckM/micropython,jlillest/micropython,warner83/micropython,cwyark/micropython,kostyll/micropython,alex-robbins/micropython,AriZuu/micropython,mgyenik/micropython,skybird6672/micropython,danicampora/micropython,stonegithubs/micropython,SungEun-Steve-Kim/test-mp,blmorris/micropython,ruffy91/micropython,firstval/micropython,turbinenreiter/micropython,kostyll/micropython,skybird6672/micropython,tralamazza/micropython,methoxid/micropystat,pramasoul/micropython,neilh10/micropython,ryannathans/micropython,alex-robbins/micropython,chrisdearman/micropython,xyb/micropython,xyb/micropython,hosaka/micropython,ruffy91/micropython,vriera/micropython,pfalcon/micropython,jmarcelino/pycom-micropython,adamkh/micropython,redbear/micropython,KISSMonX/micropython,methoxid/micropystat,ericsnowcurrently/micropython,torwag/micropython,ganshun666/micropython,tdautc19841202/micropython,ernesto-g/micropython,dinau/micropython,oopy/micropython,dxxb/micropython,aethaniel/micropython,cnoviello/micropython,chrisdearman/micropython,warner83/micropython,jmarcelino/pycom-micropython,SHA2017-badge/micropython-esp32,dinau/micropython,praemdonck/micropython,dinau/micropython,deshipu/micropython,stonegithubs/micropython,redbear/micropython,dhylands/micropython,matthewelse/micropython,ahotam/micropython,pozetroninc/micropython,ceramos/micropython,deshipu/micropython,aethaniel/micropython,tralamazza/micropython,ganshun666/micropython,adafruit/circuitpython,supergis/micropython,MrSurly/micropython,matthewelse/micropython,warner83/micropython,Peetz0r/micropython-esp32,adafruit/circuitpython,adafruit/circuitpython,blmorris/micropython,dinau/micropython,cnoviello/micropython,selste/micropython,KISSMonX/micropython,paul-xxx/micropython,tuc-osg/micropython,jimkmc/micropython,tobbad/micropython,cloudformdesign/micropython,Peetz0r/micropython-esp32,mianos/micropython,neilh10/micropython,puuu/micropython,utopiaprince/micropython,oopy/micropython,AriZuu/micropython,omtinez/micropython,hosaka/micropython,mianos/micropython,toolmacher/micropython,tobbad/micropython,supergis/micropython,mgyenik/micropython,SHA2017-badge/micropython-esp32,vitiral/micropython,adamkh/micropython,SHA2017-badge/micropython-esp32,feilongfl/micropython,TDAbboud/micropython,TDAbboud/micropython,alex-march/micropython,torwag/micropython,pramasoul/micropython,vriera/micropython,cloudformdesign/micropython,firstval/micropython,bvernoux/micropython,methoxid/micropystat,vriera/micropython,MrSurly/micropython,turbinenreiter/micropython,AriZuu/micropython,ernesto-g/micropython,infinnovation/micropython,galenhz/micropython,micropython/micropython-esp32,mhoffma/micropython,feilongfl/micropython,Timmenem/micropython,EcmaXp/micropython,xuxiaoxin/micropython,xhat/micropython,henriknelson/micropython,ruffy91/micropython,xuxiaoxin/micropython,emfcamp/micropython,cwyark/micropython,galenhz/micropython,cnoviello/micropython,tobbad/micropython,emfcamp/micropython,noahwilliamsson/micropython,mgyenik/micropython,omtinez/micropython,warner83/micropython,jmarcelino/pycom-micropython,xyb/micropython,tdautc19841202/micropython,cwyark/micropython,pfalcon/micropython,methoxid/micropystat,pfalcon/micropython,dhylands/micropython,utopiaprince/micropython,TDAbboud/micropython,pozetroninc/micropython,micropython/micropython-esp32,lbattraw/micropython,hiway/micropython,jmarcelino/pycom-micropython,cloudformdesign/micropython,heisewangluo/micropython,redbear/micropython,ruffy91/micropython,ernesto-g/micropython,turbinenreiter/micropython,noahwilliamsson/micropython,cwyark/micropython,pramasoul/micropython,ryannathans/micropython,omtinez/micropython,ericsnowcurrently/micropython,toolmacher/micropython,toolmacher/micropython,AriZuu/micropython,MrSurly/micropython-esp32,oopy/micropython,skybird6672/micropython,vitiral/micropython,ChuckM/micropython,lowRISC/micropython,mpalomer/micropython,EcmaXp/micropython,ceramos/micropython,vitiral/micropython,heisewangluo/micropython,Vogtinator/micropython,rubencabrera/micropython,mianos/micropython,ganshun666/micropython,henriknelson/micropython,slzatz/micropython,ericsnowcurrently/micropython,orionrobots/micropython,utopiaprince/micropython,aethaniel/micropython,noahchense/micropython,mpalomer/micropython,torwag/micropython,puuu/micropython,HenrikSolver/micropython,kerneltask/micropython,noahwilliamsson/micropython,dmazzella/micropython,danicampora/micropython,adamkh/micropython,hosaka/micropython,cloudformdesign/micropython,bvernoux/micropython,trezor/micropython,selste/micropython,cwyark/micropython,adamkh/micropython,alex-robbins/micropython,ChuckM/micropython,dmazzella/micropython,deshipu/micropython,pfalcon/micropython,ChuckM/micropython,jimkmc/micropython,mianos/micropython,neilh10/micropython,praemdonck/micropython,PappaPeppar/micropython,praemdonck/micropython,blmorris/micropython,hiway/micropython,HenrikSolver/micropython,HenrikSolver/micropython,ceramos/micropython,mgyenik/micropython,hiway/micropython,martinribelotta/micropython,mhoffma/micropython,adafruit/circuitpython,vriera/micropython,adafruit/circuitpython,henriknelson/micropython,SungEun-Steve-Kim/test-mp,pozetroninc/micropython,cloudformdesign/micropython,torwag/micropython,PappaPeppar/micropython,puuu/micropython,redbear/micropython,skybird6672/micropython,adafruit/circuitpython,torwag/micropython,henriknelson/micropython,jlillest/micropython,heisewangluo/micropython,jimkmc/micropython,paul-xxx/micropython,infinnovation/micropython,PappaPeppar/micropython,firstval/micropython,omtinez/micropython,jimkmc/micropython,ryannathans/micropython,emfcamp/micropython,kerneltask/micropython,infinnovation/micropython,mhoffma/micropython,xuxiaoxin/micropython,martinribelotta/micropython,danicampora/micropython,SungEun-Steve-Kim/test-mp,dxxb/micropython,paul-xxx/micropython,dinau/micropython,aethaniel/micropython,ceramos/micropython,ernesto-g/micropython,misterdanb/micropython,PappaPeppar/micropython,blazewicz/micropython,galenhz/micropython,kostyll/micropython,adafruit/micropython,tobbad/micropython,tralamazza/micropython,xhat/micropython,swegener/micropython,henriknelson/micropython,misterdanb/micropython,omtinez/micropython,pozetroninc/micropython,ericsnowcurrently/micropython,noahchense/micropython,ceramos/micropython,heisewangluo/micropython,SHA2017-badge/micropython-esp32,stonegithubs/micropython,drrk/micropython,tuc-osg/micropython,danicampora/micropython,tdautc19841202/micropython,deshipu/micropython,lbattraw/micropython,alex-march/micropython,suda/micropython,SungEun-Steve-Kim/test-mp,orionrobots/micropython,jlillest/micropython,Timmenem/micropython,misterdanb/micropython,AriZuu/micropython,alex-robbins/micropython,hiway/micropython,Vogtinator/micropython,lbattraw/micropython,trezor/micropython,supergis/micropython,selste/micropython,xuxiaoxin/micropython,ahotam/micropython,dxxb/micropython,blazewicz/micropython,kostyll/micropython,ericsnowcurrently/micropython,swegener/micropython,matthewelse/micropython,xyb/micropython,dhylands/micropython,matthewelse/micropython,ahotam/micropython,drrk/micropython,ryannathans/micropython,HenrikSolver/micropython,kerneltask/micropython,martinribelotta/micropython,slzatz/micropython,rubencabrera/micropython,adafruit/micropython,slzatz/micropython,methoxid/micropystat,dmazzella/micropython,suda/micropython,emfcamp/micropython,paul-xxx/micropython,ahotam/micropython,Vogtinator/micropython,puuu/micropython,vitiral/micropython,MrSurly/micropython,selste/micropython,slzatz/micropython,mhoffma/micropython,kerneltask/micropython,ruffy91/micropython,orionrobots/micropython,Vogtinator/micropython,firstval/micropython,redbear/micropython,kerneltask/micropython,hosaka/micropython
|
tests: Add test for accessing attribute of inherited native type.
|
class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
# TODO: Prints native base class name
#print(repr(e))
print(e.args)
|
<commit_before><commit_msg>tests: Add test for accessing attribute of inherited native type.<commit_after>
|
class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
# TODO: Prints native base class name
#print(repr(e))
print(e.args)
|
tests: Add test for accessing attribute of inherited native type.class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
# TODO: Prints native base class name
#print(repr(e))
print(e.args)
|
<commit_before><commit_msg>tests: Add test for accessing attribute of inherited native type.<commit_after>class MyExc(Exception):
pass
e = MyExc(100, "Some error")
print(e)
# TODO: Prints native base class name
#print(repr(e))
print(e.args)
|
|
c963310123765baddf638c8d08b8fdb2f73b6ba6
|
tests/basics/subclass-native4.py
|
tests/basics/subclass-native4.py
|
# Test calling non-special method inherited from native type
class mylist(list):
pass
l = mylist([1, 2, 3])
print(l)
l.append(10)
print(l)
|
Add test for calling inherited native method on subclass.
|
tests: Add test for calling inherited native method on subclass.
|
Python
|
mit
|
drrk/micropython,ryannathans/micropython,MrSurly/micropython-esp32,warner83/micropython,redbear/micropython,noahchense/micropython,AriZuu/micropython,martinribelotta/micropython,toolmacher/micropython,martinribelotta/micropython,jlillest/micropython,oopy/micropython,feilongfl/micropython,mhoffma/micropython,vitiral/micropython,matthewelse/micropython,heisewangluo/micropython,xyb/micropython,KISSMonX/micropython,neilh10/micropython,kerneltask/micropython,tobbad/micropython,ahotam/micropython,suda/micropython,selste/micropython,noahchense/micropython,blazewicz/micropython,kostyll/micropython,ceramos/micropython,emfcamp/micropython,drrk/micropython,tralamazza/micropython,vitiral/micropython,ruffy91/micropython,PappaPeppar/micropython,alex-robbins/micropython,henriknelson/micropython,micropython/micropython-esp32,SungEun-Steve-Kim/test-mp,mianos/micropython,omtinez/micropython,Timmenem/micropython,pramasoul/micropython,pfalcon/micropython,neilh10/micropython,ryannathans/micropython,EcmaXp/micropython,swegener/micropython,supergis/micropython,jmarcelino/pycom-micropython,misterdanb/micropython,swegener/micropython,mpalomer/micropython,cloudformdesign/micropython,Peetz0r/micropython-esp32,stonegithubs/micropython,rubencabrera/micropython,heisewangluo/micropython,cloudformdesign/micropython,supergis/micropython,lbattraw/micropython,neilh10/micropython,Peetz0r/micropython-esp32,aethaniel/micropython,skybird6672/micropython,tdautc19841202/micropython,vitiral/micropython,dxxb/micropython,kostyll/micropython,adamkh/micropython,KISSMonX/micropython,martinribelotta/micropython,pozetroninc/micropython,alex-march/micropython,rubencabrera/micropython,SungEun-Steve-Kim/test-mp,tobbad/micropython,rubencabrera/micropython,vitiral/micropython,cloudformdesign/micropython,hosaka/micropython,turbinenreiter/micropython,cwyark/micropython,adafruit/circuitpython,skybird6672/micropython,ericsnowcurrently/micropython,bvernoux/micropython,slzatz/micropython,xhat/micropython,firstval/micropython,martinribelotta/micropython,paul-xxx/micropython,xhat/micropython,stonegithubs/micropython,alex-march/micropython,dxxb/micropython,lowRISC/micropython,alex-march/micropython,Vogtinator/micropython,ceramos/micropython,skybird6672/micropython,ChuckM/micropython,pfalcon/micropython,micropython/micropython-esp32,mpalomer/micropython,infinnovation/micropython,methoxid/micropystat,KISSMonX/micropython,TDAbboud/micropython,drrk/micropython,ericsnowcurrently/micropython,ryannathans/micropython,cnoviello/micropython,EcmaXp/micropython,hosaka/micropython,mpalomer/micropython,omtinez/micropython,dmazzella/micropython,Vogtinator/micropython,blmorris/micropython,trezor/micropython,bvernoux/micropython,matthewelse/micropython,alex-robbins/micropython,xuxiaoxin/micropython,chrisdearman/micropython,xuxiaoxin/micropython,EcmaXp/micropython,methoxid/micropystat,selste/micropython,SungEun-Steve-Kim/test-mp,praemdonck/micropython,tralamazza/micropython,HenrikSolver/micropython,ruffy91/micropython,hosaka/micropython,vitiral/micropython,kostyll/micropython,utopiaprince/micropython,xuxiaoxin/micropython,firstval/micropython,Timmenem/micropython,tralamazza/micropython,misterdanb/micropython,MrSurly/micropython-esp32,misterdanb/micropython,suda/micropython,dxxb/micropython,tuc-osg/micropython,chrisdearman/micropython,lbattraw/micropython,noahchense/micropython,PappaPeppar/micropython,oopy/micropython,ruffy91/micropython,noahwilliamsson/micropython,adamkh/micropython,SungEun-Steve-Kim/test-mp,deshipu/micropython,galenhz/micropython,turbinenreiter/micropython,skybird6672/micropython,matthewelse/micropython,dinau/micropython,danicampora/micropython,hiway/micropython,utopiaprince/micropython,supergis/micropython,ericsnowcurrently/micropython,drrk/micropython,ChuckM/micropython,ernesto-g/micropython,warner83/micropython,warner83/micropython,galenhz/micropython,galenhz/micropython,cwyark/micropython,mpalomer/micropython,slzatz/micropython,oopy/micropython,adamkh/micropython,micropython/micropython-esp32,xhat/micropython,EcmaXp/micropython,mhoffma/micropython,xhat/micropython,KISSMonX/micropython,blmorris/micropython,heisewangluo/micropython,mianos/micropython,aethaniel/micropython,HenrikSolver/micropython,TDAbboud/micropython,MrSurly/micropython-esp32,adamkh/micropython,adafruit/micropython,cnoviello/micropython,galenhz/micropython,alex-robbins/micropython,chrisdearman/micropython,ganshun666/micropython,misterdanb/micropython,ganshun666/micropython,cwyark/micropython,pramasoul/micropython,tuc-osg/micropython,xyb/micropython,TDAbboud/micropython,kerneltask/micropython,redbear/micropython,AriZuu/micropython,infinnovation/micropython,ahotam/micropython,MrSurly/micropython,firstval/micropython,SHA2017-badge/micropython-esp32,infinnovation/micropython,feilongfl/micropython,hosaka/micropython,Vogtinator/micropython,mhoffma/micropython,orionrobots/micropython,MrSurly/micropython-esp32,warner83/micropython,Peetz0r/micropython-esp32,jimkmc/micropython,xuxiaoxin/micropython,adafruit/circuitpython,firstval/micropython,blmorris/micropython,aethaniel/micropython,supergis/micropython,hiway/micropython,utopiaprince/micropython,jlillest/micropython,alex-march/micropython,methoxid/micropystat,blmorris/micropython,AriZuu/micropython,swegener/micropython,deshipu/micropython,puuu/micropython,alex-robbins/micropython,micropython/micropython-esp32,MrSurly/micropython,dhylands/micropython,lowRISC/micropython,tobbad/micropython,lowRISC/micropython,ganshun666/micropython,torwag/micropython,oopy/micropython,jimkmc/micropython,turbinenreiter/micropython,xhat/micropython,jlillest/micropython,slzatz/micropython,kerneltask/micropython,EcmaXp/micropython,adafruit/micropython,praemdonck/micropython,dinau/micropython,toolmacher/micropython,lowRISC/micropython,torwag/micropython,toolmacher/micropython,MrSurly/micropython,martinribelotta/micropython,PappaPeppar/micropython,mianos/micropython,orionrobots/micropython,firstval/micropython,chrisdearman/micropython,chrisdearman/micropython,TDAbboud/micropython,micropython/micropython-esp32,puuu/micropython,slzatz/micropython,mgyenik/micropython,alex-robbins/micropython,MrSurly/micropython-esp32,tuc-osg/micropython,ernesto-g/micropython,danicampora/micropython,ceramos/micropython,HenrikSolver/micropython,tdautc19841202/micropython,dxxb/micropython,Peetz0r/micropython-esp32,tralamazza/micropython,danicampora/micropython,ruffy91/micropython,torwag/micropython,orionrobots/micropython,praemdonck/micropython,ahotam/micropython,jmarcelino/pycom-micropython,lbattraw/micropython,bvernoux/micropython,ChuckM/micropython,vriera/micropython,blazewicz/micropython,dhylands/micropython,jmarcelino/pycom-micropython,deshipu/micropython,adafruit/circuitpython,hosaka/micropython,blazewicz/micropython,suda/micropython,emfcamp/micropython,dmazzella/micropython,emfcamp/micropython,cnoviello/micropython,oopy/micropython,cwyark/micropython,paul-xxx/micropython,swegener/micropython,mianos/micropython,SHA2017-badge/micropython-esp32,Timmenem/micropython,mgyenik/micropython,tdautc19841202/micropython,lbattraw/micropython,SHA2017-badge/micropython-esp32,tuc-osg/micropython,kerneltask/micropython,paul-xxx/micropython,bvernoux/micropython,xyb/micropython,swegener/micropython,danicampora/micropython,mpalomer/micropython,pramasoul/micropython,vriera/micropython,adafruit/circuitpython,puuu/micropython,neilh10/micropython,danicampora/micropython,TDAbboud/micropython,pramasoul/micropython,paul-xxx/micropython,deshipu/micropython,pfalcon/micropython,jimkmc/micropython,jimkmc/micropython,HenrikSolver/micropython,ryannathans/micropython,SHA2017-badge/micropython-esp32,henriknelson/micropython,ernesto-g/micropython,tdautc19841202/micropython,kostyll/micropython,dmazzella/micropython,Vogtinator/micropython,cloudformdesign/micropython,adafruit/micropython,lowRISC/micropython,ChuckM/micropython,cwyark/micropython,jimkmc/micropython,stonegithubs/micropython,tdautc19841202/micropython,dhylands/micropython,heisewangluo/micropython,tuc-osg/micropython,jmarcelino/pycom-micropython,matthewelse/micropython,warner83/micropython,infinnovation/micropython,blazewicz/micropython,puuu/micropython,mgyenik/micropython,turbinenreiter/micropython,selste/micropython,suda/micropython,omtinez/micropython,deshipu/micropython,bvernoux/micropython,supergis/micropython,xyb/micropython,omtinez/micropython,adafruit/micropython,vriera/micropython,Vogtinator/micropython,matthewelse/micropython,redbear/micropython,cloudformdesign/micropython,blmorris/micropython,cnoviello/micropython,dinau/micropython,henriknelson/micropython,trezor/micropython,feilongfl/micropython,omtinez/micropython,methoxid/micropystat,pfalcon/micropython,noahwilliamsson/micropython,ahotam/micropython,noahwilliamsson/micropython,PappaPeppar/micropython,tobbad/micropython,Peetz0r/micropython-esp32,mhoffma/micropython,selste/micropython,jlillest/micropython,mgyenik/micropython,infinnovation/micropython,henriknelson/micropython,toolmacher/micropython,dhylands/micropython,jlillest/micropython,selste/micropython,mgyenik/micropython,trezor/micropython,torwag/micropython,emfcamp/micropython,emfcamp/micropython,torwag/micropython,mianos/micropython,ericsnowcurrently/micropython,dinau/micropython,Timmenem/micropython,jmarcelino/pycom-micropython,drrk/micropython,trezor/micropython,rubencabrera/micropython,feilongfl/micropython,galenhz/micropython,matthewelse/micropython,hiway/micropython,misterdanb/micropython,henriknelson/micropython,stonegithubs/micropython,stonegithubs/micropython,pozetroninc/micropython,xyb/micropython,dinau/micropython,pozetroninc/micropython,MrSurly/micropython,vriera/micropython,praemdonck/micropython,hiway/micropython,lbattraw/micropython,trezor/micropython,utopiaprince/micropython,tobbad/micropython,kostyll/micropython,paul-xxx/micropython,slzatz/micropython,puuu/micropython,hiway/micropython,xuxiaoxin/micropython,alex-march/micropython,dhylands/micropython,PappaPeppar/micropython,orionrobots/micropython,adafruit/micropython,dxxb/micropython,cnoviello/micropython,skybird6672/micropython,pozetroninc/micropython,redbear/micropython,aethaniel/micropython,ganshun666/micropython,blazewicz/micropython,noahchense/micropython,toolmacher/micropython,ericsnowcurrently/micropython,aethaniel/micropython,vriera/micropython,utopiaprince/micropython,redbear/micropython,ruffy91/micropython,turbinenreiter/micropython,suda/micropython,adafruit/circuitpython,noahwilliamsson/micropython,heisewangluo/micropython,dmazzella/micropython,AriZuu/micropython,AriZuu/micropython,adamkh/micropython,feilongfl/micropython,MrSurly/micropython,SungEun-Steve-Kim/test-mp,ernesto-g/micropython,pfalcon/micropython,noahchense/micropython,noahwilliamsson/micropython,ryannathans/micropython,methoxid/micropystat,ChuckM/micropython,neilh10/micropython,Timmenem/micropython,pramasoul/micropython,HenrikSolver/micropython,SHA2017-badge/micropython-esp32,pozetroninc/micropython,adafruit/circuitpython,ganshun666/micropython,KISSMonX/micropython,ahotam/micropython,kerneltask/micropython,praemdonck/micropython,orionrobots/micropython,rubencabrera/micropython,ceramos/micropython,mhoffma/micropython,ceramos/micropython,ernesto-g/micropython
|
tests: Add test for calling inherited native method on subclass.
|
# Test calling non-special method inherited from native type
class mylist(list):
pass
l = mylist([1, 2, 3])
print(l)
l.append(10)
print(l)
|
<commit_before><commit_msg>tests: Add test for calling inherited native method on subclass.<commit_after>
|
# Test calling non-special method inherited from native type
class mylist(list):
pass
l = mylist([1, 2, 3])
print(l)
l.append(10)
print(l)
|
tests: Add test for calling inherited native method on subclass.# Test calling non-special method inherited from native type
class mylist(list):
pass
l = mylist([1, 2, 3])
print(l)
l.append(10)
print(l)
|
<commit_before><commit_msg>tests: Add test for calling inherited native method on subclass.<commit_after># Test calling non-special method inherited from native type
class mylist(list):
pass
l = mylist([1, 2, 3])
print(l)
l.append(10)
print(l)
|
|
15a655e490b0f0d035edd4f772f126146959e531
|
examples/plot_dom_hits.py
|
examples/plot_dom_hits.py
|
"""
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.iteritems():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
|
Add example to show how to create dom hit statistics
|
Add example to show how to create dom hit statistics
|
Python
|
mit
|
tamasgal/km3pipe,tamasgal/km3pipe
|
Add example to show how to create dom hit statistics
|
"""
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.iteritems():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
|
<commit_before><commit_msg>Add example to show how to create dom hit statistics<commit_after>
|
"""
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.iteritems():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
|
Add example to show how to create dom hit statistics"""
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.iteritems():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
|
<commit_before><commit_msg>Add example to show how to create dom hit statistics<commit_after>"""
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.iteritems():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
|
|
b3cbf179371c289121126d0cb66d9873d740f202
|
utils/helpers.py
|
utils/helpers.py
|
from django.http import HttpResponseNotAllowed
def post_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'POST':
return HttpResponseNotAllowed(['GET'])
return func(request, *args, **kwargs)
return decorated
def get_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed(['POST'])
return func(request, *args, **kwargs)
return decorated
|
Add post_only and get_only decorators
|
Add post_only and get_only decorators
|
Python
|
apache-2.0
|
Nikola-K/django_reddit,Nikola-K/django_reddit,Nikola-K/django_reddit
|
Add post_only and get_only decorators
|
from django.http import HttpResponseNotAllowed
def post_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'POST':
return HttpResponseNotAllowed(['GET'])
return func(request, *args, **kwargs)
return decorated
def get_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed(['POST'])
return func(request, *args, **kwargs)
return decorated
|
<commit_before><commit_msg>Add post_only and get_only decorators<commit_after>
|
from django.http import HttpResponseNotAllowed
def post_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'POST':
return HttpResponseNotAllowed(['GET'])
return func(request, *args, **kwargs)
return decorated
def get_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed(['POST'])
return func(request, *args, **kwargs)
return decorated
|
Add post_only and get_only decoratorsfrom django.http import HttpResponseNotAllowed
def post_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'POST':
return HttpResponseNotAllowed(['GET'])
return func(request, *args, **kwargs)
return decorated
def get_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed(['POST'])
return func(request, *args, **kwargs)
return decorated
|
<commit_before><commit_msg>Add post_only and get_only decorators<commit_after>from django.http import HttpResponseNotAllowed
def post_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'POST':
return HttpResponseNotAllowed(['GET'])
return func(request, *args, **kwargs)
return decorated
def get_only(func):
def decorated(request, *args, **kwargs):
if request.method != 'GET':
return HttpResponseNotAllowed(['POST'])
return func(request, *args, **kwargs)
return decorated
|
|
c5723be490b5baf953de71a6af778fe5663c70ed
|
scripts/slave/recipe_modules/raw_io/test_api.py
|
scripts/slave/recipe_modules/raw_io/test_api.py
|
from slave import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None):
return data, retcode
|
Add the API available inside GenTests method for the raw_io module.
|
Add the API available inside GenTests method for the raw_io module.
This CL makes the raw_io module ready to have its output mocked
in GenTests.
R=agable@chromium.org
Review URL: https://codereview.chromium.org/160143003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@250773 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
eunchong/build,eunchong/build,eunchong/build,eunchong/build
|
Add the API available inside GenTests method for the raw_io module.
This CL makes the raw_io module ready to have its output mocked
in GenTests.
R=agable@chromium.org
Review URL: https://codereview.chromium.org/160143003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@250773 0039d316-1c4b-4281-b951-d872f2087c98
|
from slave import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None):
return data, retcode
|
<commit_before><commit_msg>Add the API available inside GenTests method for the raw_io module.
This CL makes the raw_io module ready to have its output mocked
in GenTests.
R=agable@chromium.org
Review URL: https://codereview.chromium.org/160143003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@250773 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
from slave import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None):
return data, retcode
|
Add the API available inside GenTests method for the raw_io module.
This CL makes the raw_io module ready to have its output mocked
in GenTests.
R=agable@chromium.org
Review URL: https://codereview.chromium.org/160143003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@250773 0039d316-1c4b-4281-b951-d872f2087c98from slave import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None):
return data, retcode
|
<commit_before><commit_msg>Add the API available inside GenTests method for the raw_io module.
This CL makes the raw_io module ready to have its output mocked
in GenTests.
R=agable@chromium.org
Review URL: https://codereview.chromium.org/160143003
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@250773 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>from slave import recipe_test_api
class RawIOTestApi(recipe_test_api.RecipeTestApi): # pragma: no cover
@recipe_test_api.placeholder_step_data
@staticmethod
def output(data, retcode=None):
return data, retcode
|
|
f93a801a7951f2ee1a59b536608e13928aa60102
|
yithlibraryserver/scripts/tests/test_createdb.py
|
yithlibraryserver/scripts/tests/test_createdb.py
|
# Yith Library Server is a password storage server.
# Copyright (C) 2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import sys
from yithlibraryserver.compat import StringIO
from yithlibraryserver.scripts.createdb import createdb
from yithlibraryserver.scripts.testing import ScriptTests
class BuildAssetsTests(ScriptTests):
use_db = False
def setUp(self):
super(BuildAssetsTests, self).setUp()
# Save sys values
self.old_args = sys.argv[:]
self.old_stdout = sys.stdout
def tearDown(self):
# Restore sys.values
sys.argv = self.old_args
sys.stdout = self.old_stdout
super(BuildAssetsTests, self).tearDown()
def test_no_arguments(self):
# Replace sys argv and stdout
sys.argv = []
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, 2)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, 'You must provide at least one argument\n')
def test_normal_usage(self):
sys.argv = ['notused', self.conf_file_path]
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, None)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, '')
|
Add simple test for the createdb script
|
Add simple test for the createdb script
|
Python
|
agpl-3.0
|
lorenzogil/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server
|
Add simple test for the createdb script
|
# Yith Library Server is a password storage server.
# Copyright (C) 2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import sys
from yithlibraryserver.compat import StringIO
from yithlibraryserver.scripts.createdb import createdb
from yithlibraryserver.scripts.testing import ScriptTests
class BuildAssetsTests(ScriptTests):
use_db = False
def setUp(self):
super(BuildAssetsTests, self).setUp()
# Save sys values
self.old_args = sys.argv[:]
self.old_stdout = sys.stdout
def tearDown(self):
# Restore sys.values
sys.argv = self.old_args
sys.stdout = self.old_stdout
super(BuildAssetsTests, self).tearDown()
def test_no_arguments(self):
# Replace sys argv and stdout
sys.argv = []
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, 2)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, 'You must provide at least one argument\n')
def test_normal_usage(self):
sys.argv = ['notused', self.conf_file_path]
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, None)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, '')
|
<commit_before><commit_msg>Add simple test for the createdb script<commit_after>
|
# Yith Library Server is a password storage server.
# Copyright (C) 2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import sys
from yithlibraryserver.compat import StringIO
from yithlibraryserver.scripts.createdb import createdb
from yithlibraryserver.scripts.testing import ScriptTests
class BuildAssetsTests(ScriptTests):
use_db = False
def setUp(self):
super(BuildAssetsTests, self).setUp()
# Save sys values
self.old_args = sys.argv[:]
self.old_stdout = sys.stdout
def tearDown(self):
# Restore sys.values
sys.argv = self.old_args
sys.stdout = self.old_stdout
super(BuildAssetsTests, self).tearDown()
def test_no_arguments(self):
# Replace sys argv and stdout
sys.argv = []
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, 2)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, 'You must provide at least one argument\n')
def test_normal_usage(self):
sys.argv = ['notused', self.conf_file_path]
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, None)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, '')
|
Add simple test for the createdb script# Yith Library Server is a password storage server.
# Copyright (C) 2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import sys
from yithlibraryserver.compat import StringIO
from yithlibraryserver.scripts.createdb import createdb
from yithlibraryserver.scripts.testing import ScriptTests
class BuildAssetsTests(ScriptTests):
use_db = False
def setUp(self):
super(BuildAssetsTests, self).setUp()
# Save sys values
self.old_args = sys.argv[:]
self.old_stdout = sys.stdout
def tearDown(self):
# Restore sys.values
sys.argv = self.old_args
sys.stdout = self.old_stdout
super(BuildAssetsTests, self).tearDown()
def test_no_arguments(self):
# Replace sys argv and stdout
sys.argv = []
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, 2)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, 'You must provide at least one argument\n')
def test_normal_usage(self):
sys.argv = ['notused', self.conf_file_path]
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, None)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, '')
|
<commit_before><commit_msg>Add simple test for the createdb script<commit_after># Yith Library Server is a password storage server.
# Copyright (C) 2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import sys
from yithlibraryserver.compat import StringIO
from yithlibraryserver.scripts.createdb import createdb
from yithlibraryserver.scripts.testing import ScriptTests
class BuildAssetsTests(ScriptTests):
use_db = False
def setUp(self):
super(BuildAssetsTests, self).setUp()
# Save sys values
self.old_args = sys.argv[:]
self.old_stdout = sys.stdout
def tearDown(self):
# Restore sys.values
sys.argv = self.old_args
sys.stdout = self.old_stdout
super(BuildAssetsTests, self).tearDown()
def test_no_arguments(self):
# Replace sys argv and stdout
sys.argv = []
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, 2)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, 'You must provide at least one argument\n')
def test_normal_usage(self):
sys.argv = ['notused', self.conf_file_path]
sys.stdout = StringIO()
result = createdb()
self.assertEqual(result, None)
stdout = sys.stdout.getvalue()
self.assertEqual(stdout, '')
|
|
836f23046a25edbdeafcbe487fa9f918a9bae5cb
|
Sketches/JT/Jam/application/trunk/setup_py2exe.py
|
Sketches/JT/Jam/application/trunk/setup_py2exe.py
|
#!/usr/bin/env python
#
# (C) 2008 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
import py2exe
setup(name = "Kamaelia-Jam",
version = "0.1a1",
description = "Kamaelia Jam - a multi-user networked music sequencer",
author = "Joe Turner & Kamaelia Contributors",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/KamaeliaJam",
license = "Copyright (c)2008 BBC & Kamaelia Contributors, All Rights Reserved. Use allowed under MPL 1.1, GPL 2.0, LGPL 2.1",
windows=['jam']
)
|
Add py2exe setup file for creating windows executables
|
Add py2exe setup file for creating windows executables
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Add py2exe setup file for creating windows executables
|
#!/usr/bin/env python
#
# (C) 2008 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
import py2exe
setup(name = "Kamaelia-Jam",
version = "0.1a1",
description = "Kamaelia Jam - a multi-user networked music sequencer",
author = "Joe Turner & Kamaelia Contributors",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/KamaeliaJam",
license = "Copyright (c)2008 BBC & Kamaelia Contributors, All Rights Reserved. Use allowed under MPL 1.1, GPL 2.0, LGPL 2.1",
windows=['jam']
)
|
<commit_before><commit_msg>Add py2exe setup file for creating windows executables<commit_after>
|
#!/usr/bin/env python
#
# (C) 2008 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
import py2exe
setup(name = "Kamaelia-Jam",
version = "0.1a1",
description = "Kamaelia Jam - a multi-user networked music sequencer",
author = "Joe Turner & Kamaelia Contributors",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/KamaeliaJam",
license = "Copyright (c)2008 BBC & Kamaelia Contributors, All Rights Reserved. Use allowed under MPL 1.1, GPL 2.0, LGPL 2.1",
windows=['jam']
)
|
Add py2exe setup file for creating windows executables#!/usr/bin/env python
#
# (C) 2008 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
import py2exe
setup(name = "Kamaelia-Jam",
version = "0.1a1",
description = "Kamaelia Jam - a multi-user networked music sequencer",
author = "Joe Turner & Kamaelia Contributors",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/KamaeliaJam",
license = "Copyright (c)2008 BBC & Kamaelia Contributors, All Rights Reserved. Use allowed under MPL 1.1, GPL 2.0, LGPL 2.1",
windows=['jam']
)
|
<commit_before><commit_msg>Add py2exe setup file for creating windows executables<commit_after>#!/usr/bin/env python
#
# (C) 2008 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
from distutils.core import setup
import py2exe
setup(name = "Kamaelia-Jam",
version = "0.1a1",
description = "Kamaelia Jam - a multi-user networked music sequencer",
author = "Joe Turner & Kamaelia Contributors",
author_email = "ms_@users.sourceforge.net",
url = "http://kamaelia.sourceforge.net/KamaeliaJam",
license = "Copyright (c)2008 BBC & Kamaelia Contributors, All Rights Reserved. Use allowed under MPL 1.1, GPL 2.0, LGPL 2.1",
windows=['jam']
)
|
|
fecbaf30cadcf02b44f920116edea3c5de94ba4e
|
crackingcointsolutions/chapter4/exercisethree.py
|
crackingcointsolutions/chapter4/exercisethree.py
|
'''
Created on 30 Aug 2017
@author: igoroya
'''
import collections
from chapter4 import utils
def make_lists(root_node):
stack = collections.deque()
node_i = 1
lists = []
node = root_node
stack.append(node)
while len(stack) > 0:
node = stack.pop()
add_list(lists, node_i, node.name)
node_i += 1
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
return lists
def add_list(lists, node_i, name):
get_level = calc_level(node_i)
level = get_level()
if len(lists) < level:
lists.append([name])
else:
lists[level - 1].append(name)
def calc_level(node_i): # this is a closure, reduces the overhead of having to derive level all the time
level = 1
def work():
nonlocal level
while True:
div =(pow(2, level) - 1 ) // node_i
if div == 1:
return level
elif div < 1:
level += 1
else:
print("Something went wrong")
return None # Should be an exception
return work
if __name__ == '__main__':
root_node = utils.BinaryTreeNode("A") # this is the root node
root_node.left = utils.BinaryTreeNode("B")
root_node.right = utils.BinaryTreeNode("C")
node = root_node.left
node.left = utils.BinaryTreeNode("D")
node.right = utils.BinaryTreeNode("E")
node = root_node.right
node.left = utils.BinaryTreeNode("F")
node.right = utils.BinaryTreeNode("G")
node = node.left
node.left = utils.BinaryTreeNode("H")
node.right = utils.BinaryTreeNode("I")
lists = make_lists(root_node)
print(lists)
|
Add solution for exercise 4.3
|
Add solution for exercise 4.3
|
Python
|
mit
|
igoroya/igor-oya-solutions-cracking-coding-interview
|
Add solution for exercise 4.3
|
'''
Created on 30 Aug 2017
@author: igoroya
'''
import collections
from chapter4 import utils
def make_lists(root_node):
stack = collections.deque()
node_i = 1
lists = []
node = root_node
stack.append(node)
while len(stack) > 0:
node = stack.pop()
add_list(lists, node_i, node.name)
node_i += 1
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
return lists
def add_list(lists, node_i, name):
get_level = calc_level(node_i)
level = get_level()
if len(lists) < level:
lists.append([name])
else:
lists[level - 1].append(name)
def calc_level(node_i): # this is a closure, reduces the overhead of having to derive level all the time
level = 1
def work():
nonlocal level
while True:
div =(pow(2, level) - 1 ) // node_i
if div == 1:
return level
elif div < 1:
level += 1
else:
print("Something went wrong")
return None # Should be an exception
return work
if __name__ == '__main__':
root_node = utils.BinaryTreeNode("A") # this is the root node
root_node.left = utils.BinaryTreeNode("B")
root_node.right = utils.BinaryTreeNode("C")
node = root_node.left
node.left = utils.BinaryTreeNode("D")
node.right = utils.BinaryTreeNode("E")
node = root_node.right
node.left = utils.BinaryTreeNode("F")
node.right = utils.BinaryTreeNode("G")
node = node.left
node.left = utils.BinaryTreeNode("H")
node.right = utils.BinaryTreeNode("I")
lists = make_lists(root_node)
print(lists)
|
<commit_before><commit_msg>Add solution for exercise 4.3<commit_after>
|
'''
Created on 30 Aug 2017
@author: igoroya
'''
import collections
from chapter4 import utils
def make_lists(root_node):
stack = collections.deque()
node_i = 1
lists = []
node = root_node
stack.append(node)
while len(stack) > 0:
node = stack.pop()
add_list(lists, node_i, node.name)
node_i += 1
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
return lists
def add_list(lists, node_i, name):
get_level = calc_level(node_i)
level = get_level()
if len(lists) < level:
lists.append([name])
else:
lists[level - 1].append(name)
def calc_level(node_i): # this is a closure, reduces the overhead of having to derive level all the time
level = 1
def work():
nonlocal level
while True:
div =(pow(2, level) - 1 ) // node_i
if div == 1:
return level
elif div < 1:
level += 1
else:
print("Something went wrong")
return None # Should be an exception
return work
if __name__ == '__main__':
root_node = utils.BinaryTreeNode("A") # this is the root node
root_node.left = utils.BinaryTreeNode("B")
root_node.right = utils.BinaryTreeNode("C")
node = root_node.left
node.left = utils.BinaryTreeNode("D")
node.right = utils.BinaryTreeNode("E")
node = root_node.right
node.left = utils.BinaryTreeNode("F")
node.right = utils.BinaryTreeNode("G")
node = node.left
node.left = utils.BinaryTreeNode("H")
node.right = utils.BinaryTreeNode("I")
lists = make_lists(root_node)
print(lists)
|
Add solution for exercise 4.3'''
Created on 30 Aug 2017
@author: igoroya
'''
import collections
from chapter4 import utils
def make_lists(root_node):
stack = collections.deque()
node_i = 1
lists = []
node = root_node
stack.append(node)
while len(stack) > 0:
node = stack.pop()
add_list(lists, node_i, node.name)
node_i += 1
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
return lists
def add_list(lists, node_i, name):
get_level = calc_level(node_i)
level = get_level()
if len(lists) < level:
lists.append([name])
else:
lists[level - 1].append(name)
def calc_level(node_i): # this is a closure, reduces the overhead of having to derive level all the time
level = 1
def work():
nonlocal level
while True:
div =(pow(2, level) - 1 ) // node_i
if div == 1:
return level
elif div < 1:
level += 1
else:
print("Something went wrong")
return None # Should be an exception
return work
if __name__ == '__main__':
root_node = utils.BinaryTreeNode("A") # this is the root node
root_node.left = utils.BinaryTreeNode("B")
root_node.right = utils.BinaryTreeNode("C")
node = root_node.left
node.left = utils.BinaryTreeNode("D")
node.right = utils.BinaryTreeNode("E")
node = root_node.right
node.left = utils.BinaryTreeNode("F")
node.right = utils.BinaryTreeNode("G")
node = node.left
node.left = utils.BinaryTreeNode("H")
node.right = utils.BinaryTreeNode("I")
lists = make_lists(root_node)
print(lists)
|
<commit_before><commit_msg>Add solution for exercise 4.3<commit_after>'''
Created on 30 Aug 2017
@author: igoroya
'''
import collections
from chapter4 import utils
def make_lists(root_node):
stack = collections.deque()
node_i = 1
lists = []
node = root_node
stack.append(node)
while len(stack) > 0:
node = stack.pop()
add_list(lists, node_i, node.name)
node_i += 1
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
return lists
def add_list(lists, node_i, name):
get_level = calc_level(node_i)
level = get_level()
if len(lists) < level:
lists.append([name])
else:
lists[level - 1].append(name)
def calc_level(node_i): # this is a closure, reduces the overhead of having to derive level all the time
level = 1
def work():
nonlocal level
while True:
div =(pow(2, level) - 1 ) // node_i
if div == 1:
return level
elif div < 1:
level += 1
else:
print("Something went wrong")
return None # Should be an exception
return work
if __name__ == '__main__':
root_node = utils.BinaryTreeNode("A") # this is the root node
root_node.left = utils.BinaryTreeNode("B")
root_node.right = utils.BinaryTreeNode("C")
node = root_node.left
node.left = utils.BinaryTreeNode("D")
node.right = utils.BinaryTreeNode("E")
node = root_node.right
node.left = utils.BinaryTreeNode("F")
node.right = utils.BinaryTreeNode("G")
node = node.left
node.left = utils.BinaryTreeNode("H")
node.right = utils.BinaryTreeNode("I")
lists = make_lists(root_node)
print(lists)
|
|
fad392fd67aa858f92659ab94ca1c190898d4951
|
add_csp_header.py
|
add_csp_header.py
|
# Burp extension to add CSP headers to responses
__author__ = 'jay.kelath'
# setup Imports
from burp import IBurpExtender
from burp import IHttpListener
from burp import IHttpRequestResponse
from burp import IResponseInfo
# Class BurpExtender (Required) contaning all functions used to interact with Burp Suite API
class BurpExtender(IBurpExtender, IHttpListener):
# define registerExtenderCallbacks: From IBurpExtender Interface
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our callbacks object (Burp Extensibility Feature)
self._callbacks = callbacks
# obtain an extension helpers object (Burp Extensibility Feature)
# http://portswigger.net/burp/extender/api/burp/IExtensionHelpers.html
self._helpers = callbacks.getHelpers()
# set our extension name that will display in Extender Tab
self._callbacks.setExtensionName("Add a CSP header")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# define processHttpMessage: From IHttpListener Interface
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# determine if request or response:
if not messageIsRequest:#only handle responses
response = messageInfo.getResponse() #get Response from IHttpRequestResponse instance
responseStr = self._callbacks.getHelpers().bytesToString(response)
responseParsed = self._helpers.analyzeResponse(response)
body = responseStr[responseParsed.getBodyOffset():]
headers = responseParsed.getHeaders()
headers.add('MYHEADER: CSP') #add csp header here
httpResponse = self._callbacks.getHelpers().buildHttpMessage(headers, body)
messageInfo.setResponse(httpResponse)
return
|
Add a header to burp response
|
Add a header to burp response
|
Python
|
mit
|
kelath/Burp-Extensions
|
Add a header to burp response
|
# Burp extension to add CSP headers to responses
__author__ = 'jay.kelath'
# setup Imports
from burp import IBurpExtender
from burp import IHttpListener
from burp import IHttpRequestResponse
from burp import IResponseInfo
# Class BurpExtender (Required) contaning all functions used to interact with Burp Suite API
class BurpExtender(IBurpExtender, IHttpListener):
# define registerExtenderCallbacks: From IBurpExtender Interface
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our callbacks object (Burp Extensibility Feature)
self._callbacks = callbacks
# obtain an extension helpers object (Burp Extensibility Feature)
# http://portswigger.net/burp/extender/api/burp/IExtensionHelpers.html
self._helpers = callbacks.getHelpers()
# set our extension name that will display in Extender Tab
self._callbacks.setExtensionName("Add a CSP header")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# define processHttpMessage: From IHttpListener Interface
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# determine if request or response:
if not messageIsRequest:#only handle responses
response = messageInfo.getResponse() #get Response from IHttpRequestResponse instance
responseStr = self._callbacks.getHelpers().bytesToString(response)
responseParsed = self._helpers.analyzeResponse(response)
body = responseStr[responseParsed.getBodyOffset():]
headers = responseParsed.getHeaders()
headers.add('MYHEADER: CSP') #add csp header here
httpResponse = self._callbacks.getHelpers().buildHttpMessage(headers, body)
messageInfo.setResponse(httpResponse)
return
|
<commit_before><commit_msg>Add a header to burp response<commit_after>
|
# Burp extension to add CSP headers to responses
__author__ = 'jay.kelath'
# setup Imports
from burp import IBurpExtender
from burp import IHttpListener
from burp import IHttpRequestResponse
from burp import IResponseInfo
# Class BurpExtender (Required) contaning all functions used to interact with Burp Suite API
class BurpExtender(IBurpExtender, IHttpListener):
# define registerExtenderCallbacks: From IBurpExtender Interface
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our callbacks object (Burp Extensibility Feature)
self._callbacks = callbacks
# obtain an extension helpers object (Burp Extensibility Feature)
# http://portswigger.net/burp/extender/api/burp/IExtensionHelpers.html
self._helpers = callbacks.getHelpers()
# set our extension name that will display in Extender Tab
self._callbacks.setExtensionName("Add a CSP header")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# define processHttpMessage: From IHttpListener Interface
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# determine if request or response:
if not messageIsRequest:#only handle responses
response = messageInfo.getResponse() #get Response from IHttpRequestResponse instance
responseStr = self._callbacks.getHelpers().bytesToString(response)
responseParsed = self._helpers.analyzeResponse(response)
body = responseStr[responseParsed.getBodyOffset():]
headers = responseParsed.getHeaders()
headers.add('MYHEADER: CSP') #add csp header here
httpResponse = self._callbacks.getHelpers().buildHttpMessage(headers, body)
messageInfo.setResponse(httpResponse)
return
|
Add a header to burp response# Burp extension to add CSP headers to responses
__author__ = 'jay.kelath'
# setup Imports
from burp import IBurpExtender
from burp import IHttpListener
from burp import IHttpRequestResponse
from burp import IResponseInfo
# Class BurpExtender (Required) contaning all functions used to interact with Burp Suite API
class BurpExtender(IBurpExtender, IHttpListener):
# define registerExtenderCallbacks: From IBurpExtender Interface
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our callbacks object (Burp Extensibility Feature)
self._callbacks = callbacks
# obtain an extension helpers object (Burp Extensibility Feature)
# http://portswigger.net/burp/extender/api/burp/IExtensionHelpers.html
self._helpers = callbacks.getHelpers()
# set our extension name that will display in Extender Tab
self._callbacks.setExtensionName("Add a CSP header")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# define processHttpMessage: From IHttpListener Interface
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# determine if request or response:
if not messageIsRequest:#only handle responses
response = messageInfo.getResponse() #get Response from IHttpRequestResponse instance
responseStr = self._callbacks.getHelpers().bytesToString(response)
responseParsed = self._helpers.analyzeResponse(response)
body = responseStr[responseParsed.getBodyOffset():]
headers = responseParsed.getHeaders()
headers.add('MYHEADER: CSP') #add csp header here
httpResponse = self._callbacks.getHelpers().buildHttpMessage(headers, body)
messageInfo.setResponse(httpResponse)
return
|
<commit_before><commit_msg>Add a header to burp response<commit_after># Burp extension to add CSP headers to responses
__author__ = 'jay.kelath'
# setup Imports
from burp import IBurpExtender
from burp import IHttpListener
from burp import IHttpRequestResponse
from burp import IResponseInfo
# Class BurpExtender (Required) contaning all functions used to interact with Burp Suite API
class BurpExtender(IBurpExtender, IHttpListener):
# define registerExtenderCallbacks: From IBurpExtender Interface
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our callbacks object (Burp Extensibility Feature)
self._callbacks = callbacks
# obtain an extension helpers object (Burp Extensibility Feature)
# http://portswigger.net/burp/extender/api/burp/IExtensionHelpers.html
self._helpers = callbacks.getHelpers()
# set our extension name that will display in Extender Tab
self._callbacks.setExtensionName("Add a CSP header")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# define processHttpMessage: From IHttpListener Interface
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# determine if request or response:
if not messageIsRequest:#only handle responses
response = messageInfo.getResponse() #get Response from IHttpRequestResponse instance
responseStr = self._callbacks.getHelpers().bytesToString(response)
responseParsed = self._helpers.analyzeResponse(response)
body = responseStr[responseParsed.getBodyOffset():]
headers = responseParsed.getHeaders()
headers.add('MYHEADER: CSP') #add csp header here
httpResponse = self._callbacks.getHelpers().buildHttpMessage(headers, body)
messageInfo.setResponse(httpResponse)
return
|
|
e142d7ec83d9b5896a741a84ff5148f300254d89
|
test/util/test_cubic_interpolation.py
|
test/util/test_cubic_interpolation.py
|
import torch
from gpytorch.utils.interpolation import Interpolation
from gpytorch import utils
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
|
Add basic unit test for cubic interpolation
|
Add basic unit test for cubic interpolation
|
Python
|
mit
|
jrg365/gpytorch,jrg365/gpytorch,jrg365/gpytorch
|
Add basic unit test for cubic interpolation
|
import torch
from gpytorch.utils.interpolation import Interpolation
from gpytorch import utils
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
|
<commit_before><commit_msg>Add basic unit test for cubic interpolation<commit_after>
|
import torch
from gpytorch.utils.interpolation import Interpolation
from gpytorch import utils
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
|
Add basic unit test for cubic interpolationimport torch
from gpytorch.utils.interpolation import Interpolation
from gpytorch import utils
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
|
<commit_before><commit_msg>Add basic unit test for cubic interpolation<commit_after>import torch
from gpytorch.utils.interpolation import Interpolation
from gpytorch import utils
def test_interpolation():
x = torch.linspace(0.01, 1, 100)
grid = torch.linspace(-0.05, 1.05, 50)
J, C = Interpolation().interpolate(grid, x)
W = utils.index_coef_to_sparse(J, C, len(grid))
test_func_grid = grid.pow(2)
test_func_x = x.pow(2)
interp_func_x = torch.dsmm(W, test_func_grid.unsqueeze(1)).squeeze()
assert all(torch.abs(interp_func_x - test_func_x) / (test_func_x + 1e-10) < 1e-5)
|
|
014096616d8263ea96b5b8a2b7926b498f02b425
|
setupdefaults.py
|
setupdefaults.py
|
#! /usr/bin/python
# Encoding: utf-8
import json
import os
import re
import subprocess
import sys
class TypeMapping:
def __init__(self, py_types, type_string, value_transformer):
self.py_types = py_types
self.type_string = type_string
self.value_transformer = value_transformer
def map_type(value):
# Unsupported property list types: data, date, array, array-add, dict, dict-add
# bool must be checked before int because it is a subtype of int
mappings = [
TypeMapping([str, unicode], "string", lambda val: val),
TypeMapping([bool], "bool", lambda val: "TRUE" if val else "FALSE"),
TypeMapping([int, long], "int", lambda val: str(val)),
TypeMapping([float], "float", lambda val: str(val)),
]
for mapping in mappings:
for py_type in mapping.py_types:
if isinstance(value, py_type):
return mapping.type_string, mapping.value_transformer(value)
return None, None
def write_defaults_item(domain, key, value):
type, string_value = map_type(value)
if type is None:
print "Skipping unsupported pair: " + key + str(value)
return False
subprocess.call(["defaults", "write", domain, key, "-" + type, string_value])
return True
def write_defaults(domain, patch_file):
success_count = 0
for key, value in json.load(patch_file).iteritems():
if write_defaults_item(domain, key, value):
success_count += 1
print "Wrote " + str(success_count) + " defaults for " + domain
def setup_defaults(data_directory):
for file_name in os.listdir(data_directory):
# Skip hidden files
if file_name.startswith("."):
continue
# File name is expected to be the user defaults domain, with an optional .json extension.
domain = re.sub("\.json$", "", file_name)
with open(os.path.join(data_directory, file_name), "r") as opened_file:
write_defaults(domain, opened_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Too few arguments. Usage: %s <data directory>' % sys.argv[0])
setup_defaults(sys.argv[1])
|
Add script for writing defaults from a directory of patch dictionaries.
|
Add script for writing defaults from a directory of patch dictionaries.
|
Python
|
mit
|
douglashill/OS-X-setup,douglashill/OS-X-setup
|
Add script for writing defaults from a directory of patch dictionaries.
|
#! /usr/bin/python
# Encoding: utf-8
import json
import os
import re
import subprocess
import sys
class TypeMapping:
def __init__(self, py_types, type_string, value_transformer):
self.py_types = py_types
self.type_string = type_string
self.value_transformer = value_transformer
def map_type(value):
# Unsupported property list types: data, date, array, array-add, dict, dict-add
# bool must be checked before int because it is a subtype of int
mappings = [
TypeMapping([str, unicode], "string", lambda val: val),
TypeMapping([bool], "bool", lambda val: "TRUE" if val else "FALSE"),
TypeMapping([int, long], "int", lambda val: str(val)),
TypeMapping([float], "float", lambda val: str(val)),
]
for mapping in mappings:
for py_type in mapping.py_types:
if isinstance(value, py_type):
return mapping.type_string, mapping.value_transformer(value)
return None, None
def write_defaults_item(domain, key, value):
type, string_value = map_type(value)
if type is None:
print "Skipping unsupported pair: " + key + str(value)
return False
subprocess.call(["defaults", "write", domain, key, "-" + type, string_value])
return True
def write_defaults(domain, patch_file):
success_count = 0
for key, value in json.load(patch_file).iteritems():
if write_defaults_item(domain, key, value):
success_count += 1
print "Wrote " + str(success_count) + " defaults for " + domain
def setup_defaults(data_directory):
for file_name in os.listdir(data_directory):
# Skip hidden files
if file_name.startswith("."):
continue
# File name is expected to be the user defaults domain, with an optional .json extension.
domain = re.sub("\.json$", "", file_name)
with open(os.path.join(data_directory, file_name), "r") as opened_file:
write_defaults(domain, opened_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Too few arguments. Usage: %s <data directory>' % sys.argv[0])
setup_defaults(sys.argv[1])
|
<commit_before><commit_msg>Add script for writing defaults from a directory of patch dictionaries.<commit_after>
|
#! /usr/bin/python
# Encoding: utf-8
import json
import os
import re
import subprocess
import sys
class TypeMapping:
def __init__(self, py_types, type_string, value_transformer):
self.py_types = py_types
self.type_string = type_string
self.value_transformer = value_transformer
def map_type(value):
# Unsupported property list types: data, date, array, array-add, dict, dict-add
# bool must be checked before int because it is a subtype of int
mappings = [
TypeMapping([str, unicode], "string", lambda val: val),
TypeMapping([bool], "bool", lambda val: "TRUE" if val else "FALSE"),
TypeMapping([int, long], "int", lambda val: str(val)),
TypeMapping([float], "float", lambda val: str(val)),
]
for mapping in mappings:
for py_type in mapping.py_types:
if isinstance(value, py_type):
return mapping.type_string, mapping.value_transformer(value)
return None, None
def write_defaults_item(domain, key, value):
type, string_value = map_type(value)
if type is None:
print "Skipping unsupported pair: " + key + str(value)
return False
subprocess.call(["defaults", "write", domain, key, "-" + type, string_value])
return True
def write_defaults(domain, patch_file):
success_count = 0
for key, value in json.load(patch_file).iteritems():
if write_defaults_item(domain, key, value):
success_count += 1
print "Wrote " + str(success_count) + " defaults for " + domain
def setup_defaults(data_directory):
for file_name in os.listdir(data_directory):
# Skip hidden files
if file_name.startswith("."):
continue
# File name is expected to be the user defaults domain, with an optional .json extension.
domain = re.sub("\.json$", "", file_name)
with open(os.path.join(data_directory, file_name), "r") as opened_file:
write_defaults(domain, opened_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Too few arguments. Usage: %s <data directory>' % sys.argv[0])
setup_defaults(sys.argv[1])
|
Add script for writing defaults from a directory of patch dictionaries.#! /usr/bin/python
# Encoding: utf-8
import json
import os
import re
import subprocess
import sys
class TypeMapping:
def __init__(self, py_types, type_string, value_transformer):
self.py_types = py_types
self.type_string = type_string
self.value_transformer = value_transformer
def map_type(value):
# Unsupported property list types: data, date, array, array-add, dict, dict-add
# bool must be checked before int because it is a subtype of int
mappings = [
TypeMapping([str, unicode], "string", lambda val: val),
TypeMapping([bool], "bool", lambda val: "TRUE" if val else "FALSE"),
TypeMapping([int, long], "int", lambda val: str(val)),
TypeMapping([float], "float", lambda val: str(val)),
]
for mapping in mappings:
for py_type in mapping.py_types:
if isinstance(value, py_type):
return mapping.type_string, mapping.value_transformer(value)
return None, None
def write_defaults_item(domain, key, value):
type, string_value = map_type(value)
if type is None:
print "Skipping unsupported pair: " + key + str(value)
return False
subprocess.call(["defaults", "write", domain, key, "-" + type, string_value])
return True
def write_defaults(domain, patch_file):
success_count = 0
for key, value in json.load(patch_file).iteritems():
if write_defaults_item(domain, key, value):
success_count += 1
print "Wrote " + str(success_count) + " defaults for " + domain
def setup_defaults(data_directory):
for file_name in os.listdir(data_directory):
# Skip hidden files
if file_name.startswith("."):
continue
# File name is expected to be the user defaults domain, with an optional .json extension.
domain = re.sub("\.json$", "", file_name)
with open(os.path.join(data_directory, file_name), "r") as opened_file:
write_defaults(domain, opened_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Too few arguments. Usage: %s <data directory>' % sys.argv[0])
setup_defaults(sys.argv[1])
|
<commit_before><commit_msg>Add script for writing defaults from a directory of patch dictionaries.<commit_after>#! /usr/bin/python
# Encoding: utf-8
import json
import os
import re
import subprocess
import sys
class TypeMapping:
def __init__(self, py_types, type_string, value_transformer):
self.py_types = py_types
self.type_string = type_string
self.value_transformer = value_transformer
def map_type(value):
# Unsupported property list types: data, date, array, array-add, dict, dict-add
# bool must be checked before int because it is a subtype of int
mappings = [
TypeMapping([str, unicode], "string", lambda val: val),
TypeMapping([bool], "bool", lambda val: "TRUE" if val else "FALSE"),
TypeMapping([int, long], "int", lambda val: str(val)),
TypeMapping([float], "float", lambda val: str(val)),
]
for mapping in mappings:
for py_type in mapping.py_types:
if isinstance(value, py_type):
return mapping.type_string, mapping.value_transformer(value)
return None, None
def write_defaults_item(domain, key, value):
type, string_value = map_type(value)
if type is None:
print "Skipping unsupported pair: " + key + str(value)
return False
subprocess.call(["defaults", "write", domain, key, "-" + type, string_value])
return True
def write_defaults(domain, patch_file):
success_count = 0
for key, value in json.load(patch_file).iteritems():
if write_defaults_item(domain, key, value):
success_count += 1
print "Wrote " + str(success_count) + " defaults for " + domain
def setup_defaults(data_directory):
for file_name in os.listdir(data_directory):
# Skip hidden files
if file_name.startswith("."):
continue
# File name is expected to be the user defaults domain, with an optional .json extension.
domain = re.sub("\.json$", "", file_name)
with open(os.path.join(data_directory, file_name), "r") as opened_file:
write_defaults(domain, opened_file)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Too few arguments. Usage: %s <data directory>' % sys.argv[0])
setup_defaults(sys.argv[1])
|
|
99ee6ecfa4bafbfd66a3cb2c2315a8daeeff3023
|
Lib/extractor/formats/ufo.py
|
Lib/extractor/formats/ufo.py
|
import os
from robofab.ufoLib import UFOReader
# ----------------
# Public Functions
# ----------------
def isUFO(pathOrFile):
if not isinstance(pathOrFile, basestring):
return False
if os.path.splitext(pathOrFile)[-1].lower() != ".ufo":
return False
if not os.path.isdir(pathOrFile):
return False
try:
reader = UFOReader(pathOrFile)
except:
return False
return True
def extractFontFromUFO(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, doGroups=True, doFeatures=True, doLib=True, customFunctions=[]):
source = UFOReader(pathOrFile)
if doInfo:
source.readInfo(destination.info)
if doKerning:
kerning = source.readKerning()
destination.kerning.update(kerning)
if doGroups:
groups = source.readGroups()
destination.groups.update(groups)
if doFeatures:
features = source.readFeatures()
destination.features.text = features
if doLib:
lib = source.readLib()
destination.lib.update(lib)
if doGlyphs:
glyphSet = source.getGlyphSet()
for glyphName in glyphSet.keys():
destination.newGlyph(glyphName)
glyph = destination[glyphName]
pointPen = glyph.getPointPen()
glyphSet.readGlyph(glyphName=glyphName, glyphObject=glyph, pointPen=pointPen)
for function in customFunctions:
function(source, destination)
|
Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.
|
Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.
|
Python
|
mit
|
typesupply/extractor,typemytype/extractor,anthrotype/extractor
|
Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.
|
import os
from robofab.ufoLib import UFOReader
# ----------------
# Public Functions
# ----------------
def isUFO(pathOrFile):
if not isinstance(pathOrFile, basestring):
return False
if os.path.splitext(pathOrFile)[-1].lower() != ".ufo":
return False
if not os.path.isdir(pathOrFile):
return False
try:
reader = UFOReader(pathOrFile)
except:
return False
return True
def extractFontFromUFO(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, doGroups=True, doFeatures=True, doLib=True, customFunctions=[]):
source = UFOReader(pathOrFile)
if doInfo:
source.readInfo(destination.info)
if doKerning:
kerning = source.readKerning()
destination.kerning.update(kerning)
if doGroups:
groups = source.readGroups()
destination.groups.update(groups)
if doFeatures:
features = source.readFeatures()
destination.features.text = features
if doLib:
lib = source.readLib()
destination.lib.update(lib)
if doGlyphs:
glyphSet = source.getGlyphSet()
for glyphName in glyphSet.keys():
destination.newGlyph(glyphName)
glyph = destination[glyphName]
pointPen = glyph.getPointPen()
glyphSet.readGlyph(glyphName=glyphName, glyphObject=glyph, pointPen=pointPen)
for function in customFunctions:
function(source, destination)
|
<commit_before><commit_msg>Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.<commit_after>
|
import os
from robofab.ufoLib import UFOReader
# ----------------
# Public Functions
# ----------------
def isUFO(pathOrFile):
if not isinstance(pathOrFile, basestring):
return False
if os.path.splitext(pathOrFile)[-1].lower() != ".ufo":
return False
if not os.path.isdir(pathOrFile):
return False
try:
reader = UFOReader(pathOrFile)
except:
return False
return True
def extractFontFromUFO(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, doGroups=True, doFeatures=True, doLib=True, customFunctions=[]):
source = UFOReader(pathOrFile)
if doInfo:
source.readInfo(destination.info)
if doKerning:
kerning = source.readKerning()
destination.kerning.update(kerning)
if doGroups:
groups = source.readGroups()
destination.groups.update(groups)
if doFeatures:
features = source.readFeatures()
destination.features.text = features
if doLib:
lib = source.readLib()
destination.lib.update(lib)
if doGlyphs:
glyphSet = source.getGlyphSet()
for glyphName in glyphSet.keys():
destination.newGlyph(glyphName)
glyph = destination[glyphName]
pointPen = glyph.getPointPen()
glyphSet.readGlyph(glyphName=glyphName, glyphObject=glyph, pointPen=pointPen)
for function in customFunctions:
function(source, destination)
|
Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.import os
from robofab.ufoLib import UFOReader
# ----------------
# Public Functions
# ----------------
def isUFO(pathOrFile):
if not isinstance(pathOrFile, basestring):
return False
if os.path.splitext(pathOrFile)[-1].lower() != ".ufo":
return False
if not os.path.isdir(pathOrFile):
return False
try:
reader = UFOReader(pathOrFile)
except:
return False
return True
def extractFontFromUFO(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, doGroups=True, doFeatures=True, doLib=True, customFunctions=[]):
source = UFOReader(pathOrFile)
if doInfo:
source.readInfo(destination.info)
if doKerning:
kerning = source.readKerning()
destination.kerning.update(kerning)
if doGroups:
groups = source.readGroups()
destination.groups.update(groups)
if doFeatures:
features = source.readFeatures()
destination.features.text = features
if doLib:
lib = source.readLib()
destination.lib.update(lib)
if doGlyphs:
glyphSet = source.getGlyphSet()
for glyphName in glyphSet.keys():
destination.newGlyph(glyphName)
glyph = destination[glyphName]
pointPen = glyph.getPointPen()
glyphSet.readGlyph(glyphName=glyphName, glyphObject=glyph, pointPen=pointPen)
for function in customFunctions:
function(source, destination)
|
<commit_before><commit_msg>Add support for extracting a UFO from a UFO. It sounds silly, but it is useful.<commit_after>import os
from robofab.ufoLib import UFOReader
# ----------------
# Public Functions
# ----------------
def isUFO(pathOrFile):
if not isinstance(pathOrFile, basestring):
return False
if os.path.splitext(pathOrFile)[-1].lower() != ".ufo":
return False
if not os.path.isdir(pathOrFile):
return False
try:
reader = UFOReader(pathOrFile)
except:
return False
return True
def extractFontFromUFO(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, doGroups=True, doFeatures=True, doLib=True, customFunctions=[]):
source = UFOReader(pathOrFile)
if doInfo:
source.readInfo(destination.info)
if doKerning:
kerning = source.readKerning()
destination.kerning.update(kerning)
if doGroups:
groups = source.readGroups()
destination.groups.update(groups)
if doFeatures:
features = source.readFeatures()
destination.features.text = features
if doLib:
lib = source.readLib()
destination.lib.update(lib)
if doGlyphs:
glyphSet = source.getGlyphSet()
for glyphName in glyphSet.keys():
destination.newGlyph(glyphName)
glyph = destination[glyphName]
pointPen = glyph.getPointPen()
glyphSet.readGlyph(glyphName=glyphName, glyphObject=glyph, pointPen=pointPen)
for function in customFunctions:
function(source, destination)
|
|
5a6759b131e4a61f9e7e4aaebb190a7e04b28b00
|
create-colormaps.py
|
create-colormaps.py
|
"""
Export colormaps from Python / matplotlib to JavaScript.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import json
from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Loop over all matplotlib colormaps and store them in a dictionary. This
# dictionary contains the colors of the colormap as list of lists (= RGB
# tuples), and whether or not the colormap should be interpolated (= false
# for qualitative colormaps).
colormaps = {}
for name in dir(cm):
# Skip reversed colormaps (they don't contain additional information)
if name.endswith("_r"):
continue
# If `cmap` is a colormap, we can store the association information
if isinstance(cmap := getattr(cm, name), Colormap):
# Evaluate colormap on the grid to get colors, drop alpha channel
# information, and round to a reasonable precision
colors = np.around(cmap(np.linspace(0, 1, cmap.N))[:, 0:3], 4)
# Store relevant colormap information
colormaps[cmap.name] = {
"interpolate": cmap.N >= 256,
"colors": colors.tolist(),
}
# Save colormap data and shortcuts to data.js. The contents of this file
# need to be copied manually to js-colormaps.js.
with open("data.js", "w") as json_file:
# Write the data dictionary to data.js
json_file.write(f"const data = {json.dumps(colormaps)};")
json_file.write('\n\n')
# Write partial function applications to data.js so that we can use
# a colormap by its name --- e.g., call viridis(0.5) to evaluate the
# viridis colormap at a value of 0.5.
for name in colormaps.keys():
json_file.write(f"const {name} = partial('{name}');\n")
json_file.write(f"const {name}_r = partial('{name}_r');\n")
# Final words
print("\nExported data to data.js, please copy to js-colormaps.js!\n")
|
Update script to export colormaps
|
Update script to export colormaps
|
Python
|
mit
|
derherrg/js-colormaps,derherrg/js-colormaps
|
Update script to export colormaps
|
"""
Export colormaps from Python / matplotlib to JavaScript.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import json
from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Loop over all matplotlib colormaps and store them in a dictionary. This
# dictionary contains the colors of the colormap as list of lists (= RGB
# tuples), and whether or not the colormap should be interpolated (= false
# for qualitative colormaps).
colormaps = {}
for name in dir(cm):
# Skip reversed colormaps (they don't contain additional information)
if name.endswith("_r"):
continue
# If `cmap` is a colormap, we can store the association information
if isinstance(cmap := getattr(cm, name), Colormap):
# Evaluate colormap on the grid to get colors, drop alpha channel
# information, and round to a reasonable precision
colors = np.around(cmap(np.linspace(0, 1, cmap.N))[:, 0:3], 4)
# Store relevant colormap information
colormaps[cmap.name] = {
"interpolate": cmap.N >= 256,
"colors": colors.tolist(),
}
# Save colormap data and shortcuts to data.js. The contents of this file
# need to be copied manually to js-colormaps.js.
with open("data.js", "w") as json_file:
# Write the data dictionary to data.js
json_file.write(f"const data = {json.dumps(colormaps)};")
json_file.write('\n\n')
# Write partial function applications to data.js so that we can use
# a colormap by its name --- e.g., call viridis(0.5) to evaluate the
# viridis colormap at a value of 0.5.
for name in colormaps.keys():
json_file.write(f"const {name} = partial('{name}');\n")
json_file.write(f"const {name}_r = partial('{name}_r');\n")
# Final words
print("\nExported data to data.js, please copy to js-colormaps.js!\n")
|
<commit_before><commit_msg>Update script to export colormaps<commit_after>
|
"""
Export colormaps from Python / matplotlib to JavaScript.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import json
from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Loop over all matplotlib colormaps and store them in a dictionary. This
# dictionary contains the colors of the colormap as list of lists (= RGB
# tuples), and whether or not the colormap should be interpolated (= false
# for qualitative colormaps).
colormaps = {}
for name in dir(cm):
# Skip reversed colormaps (they don't contain additional information)
if name.endswith("_r"):
continue
# If `cmap` is a colormap, we can store the association information
if isinstance(cmap := getattr(cm, name), Colormap):
# Evaluate colormap on the grid to get colors, drop alpha channel
# information, and round to a reasonable precision
colors = np.around(cmap(np.linspace(0, 1, cmap.N))[:, 0:3], 4)
# Store relevant colormap information
colormaps[cmap.name] = {
"interpolate": cmap.N >= 256,
"colors": colors.tolist(),
}
# Save colormap data and shortcuts to data.js. The contents of this file
# need to be copied manually to js-colormaps.js.
with open("data.js", "w") as json_file:
# Write the data dictionary to data.js
json_file.write(f"const data = {json.dumps(colormaps)};")
json_file.write('\n\n')
# Write partial function applications to data.js so that we can use
# a colormap by its name --- e.g., call viridis(0.5) to evaluate the
# viridis colormap at a value of 0.5.
for name in colormaps.keys():
json_file.write(f"const {name} = partial('{name}');\n")
json_file.write(f"const {name}_r = partial('{name}_r');\n")
# Final words
print("\nExported data to data.js, please copy to js-colormaps.js!\n")
|
Update script to export colormaps"""
Export colormaps from Python / matplotlib to JavaScript.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import json
from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Loop over all matplotlib colormaps and store them in a dictionary. This
# dictionary contains the colors of the colormap as list of lists (= RGB
# tuples), and whether or not the colormap should be interpolated (= false
# for qualitative colormaps).
colormaps = {}
for name in dir(cm):
# Skip reversed colormaps (they don't contain additional information)
if name.endswith("_r"):
continue
# If `cmap` is a colormap, we can store the association information
if isinstance(cmap := getattr(cm, name), Colormap):
# Evaluate colormap on the grid to get colors, drop alpha channel
# information, and round to a reasonable precision
colors = np.around(cmap(np.linspace(0, 1, cmap.N))[:, 0:3], 4)
# Store relevant colormap information
colormaps[cmap.name] = {
"interpolate": cmap.N >= 256,
"colors": colors.tolist(),
}
# Save colormap data and shortcuts to data.js. The contents of this file
# need to be copied manually to js-colormaps.js.
with open("data.js", "w") as json_file:
# Write the data dictionary to data.js
json_file.write(f"const data = {json.dumps(colormaps)};")
json_file.write('\n\n')
# Write partial function applications to data.js so that we can use
# a colormap by its name --- e.g., call viridis(0.5) to evaluate the
# viridis colormap at a value of 0.5.
for name in colormaps.keys():
json_file.write(f"const {name} = partial('{name}');\n")
json_file.write(f"const {name}_r = partial('{name}_r');\n")
# Final words
print("\nExported data to data.js, please copy to js-colormaps.js!\n")
|
<commit_before><commit_msg>Update script to export colormaps<commit_after>"""
Export colormaps from Python / matplotlib to JavaScript.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import json
from matplotlib.colors import Colormap
import matplotlib.cm as cm
import matplotlib.colors as colors
import numpy as np
# -----------------------------------------------------------------------------
# MAIN CODE
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Loop over all matplotlib colormaps and store them in a dictionary. This
# dictionary contains the colors of the colormap as list of lists (= RGB
# tuples), and whether or not the colormap should be interpolated (= false
# for qualitative colormaps).
colormaps = {}
for name in dir(cm):
# Skip reversed colormaps (they don't contain additional information)
if name.endswith("_r"):
continue
# If `cmap` is a colormap, we can store the association information
if isinstance(cmap := getattr(cm, name), Colormap):
# Evaluate colormap on the grid to get colors, drop alpha channel
# information, and round to a reasonable precision
colors = np.around(cmap(np.linspace(0, 1, cmap.N))[:, 0:3], 4)
# Store relevant colormap information
colormaps[cmap.name] = {
"interpolate": cmap.N >= 256,
"colors": colors.tolist(),
}
# Save colormap data and shortcuts to data.js. The contents of this file
# need to be copied manually to js-colormaps.js.
with open("data.js", "w") as json_file:
# Write the data dictionary to data.js
json_file.write(f"const data = {json.dumps(colormaps)};")
json_file.write('\n\n')
# Write partial function applications to data.js so that we can use
# a colormap by its name --- e.g., call viridis(0.5) to evaluate the
# viridis colormap at a value of 0.5.
for name in colormaps.keys():
json_file.write(f"const {name} = partial('{name}');\n")
json_file.write(f"const {name}_r = partial('{name}_r');\n")
# Final words
print("\nExported data to data.js, please copy to js-colormaps.js!\n")
|
|
974651717968885fd766f17ce942e77a15011253
|
go/apps/dialogue/tests/test_dialogue_api.py
|
go/apps/dialogue/tests/test_dialogue_api.py
|
"""Tests for go.apps.dialogue.dialogue_api."""
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from go.apps.dialogue.dialogue_api import DialogueActionDispatcher
from go.vumitools.api import VumiApi
from go.vumitools.tests.utils import GoAppWorkerTestMixin
class DialogueActionDispatcherTestCase(TestCase, GoAppWorkerTestMixin):
use_riak = True
@inlineCallbacks
def setUp(self):
self._persist_setUp()
self.config = self.mk_config({})
self.vumi_api = yield VumiApi.from_config_async(self.config)
self.account = yield self.mk_user(self.vumi_api, u'user')
self.user_api = self.vumi_api.get_user_api(self.account.key)
self.dispatcher = DialogueActionDispatcher(self.user_api)
def create_dialogue(self, poll):
config = {
"poll": poll,
}
return self.create_conversation(
conversation_type=u'dialogue', config=config)
@inlineCallbacks
def test_get_poll(self):
conv = yield self.create_dialogue(poll={"foo": "bar"})
result = yield self.dispatcher.dispatch_action(conv, "get_poll", {})
self.assertEqual(result, {"poll": {"foo": "bar"}})
@inlineCallbacks
def test_save_poll(self):
conv = yield self.create_dialogue(poll={})
result = yield self.dispatcher.dispatch_action(
conv, "save_poll", {"poll": {"foo": "bar"}})
self.assertEqual(result, {"saved": True})
conv = yield self.user_api.get_conversation(conv.key)
self.assertEqual(conv.config, {
"poll": {"foo": "bar"},
})
|
Add tests for get_poll and save_poll.
|
Add tests for get_poll and save_poll.
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
Add tests for get_poll and save_poll.
|
"""Tests for go.apps.dialogue.dialogue_api."""
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from go.apps.dialogue.dialogue_api import DialogueActionDispatcher
from go.vumitools.api import VumiApi
from go.vumitools.tests.utils import GoAppWorkerTestMixin
class DialogueActionDispatcherTestCase(TestCase, GoAppWorkerTestMixin):
use_riak = True
@inlineCallbacks
def setUp(self):
self._persist_setUp()
self.config = self.mk_config({})
self.vumi_api = yield VumiApi.from_config_async(self.config)
self.account = yield self.mk_user(self.vumi_api, u'user')
self.user_api = self.vumi_api.get_user_api(self.account.key)
self.dispatcher = DialogueActionDispatcher(self.user_api)
def create_dialogue(self, poll):
config = {
"poll": poll,
}
return self.create_conversation(
conversation_type=u'dialogue', config=config)
@inlineCallbacks
def test_get_poll(self):
conv = yield self.create_dialogue(poll={"foo": "bar"})
result = yield self.dispatcher.dispatch_action(conv, "get_poll", {})
self.assertEqual(result, {"poll": {"foo": "bar"}})
@inlineCallbacks
def test_save_poll(self):
conv = yield self.create_dialogue(poll={})
result = yield self.dispatcher.dispatch_action(
conv, "save_poll", {"poll": {"foo": "bar"}})
self.assertEqual(result, {"saved": True})
conv = yield self.user_api.get_conversation(conv.key)
self.assertEqual(conv.config, {
"poll": {"foo": "bar"},
})
|
<commit_before><commit_msg>Add tests for get_poll and save_poll.<commit_after>
|
"""Tests for go.apps.dialogue.dialogue_api."""
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from go.apps.dialogue.dialogue_api import DialogueActionDispatcher
from go.vumitools.api import VumiApi
from go.vumitools.tests.utils import GoAppWorkerTestMixin
class DialogueActionDispatcherTestCase(TestCase, GoAppWorkerTestMixin):
use_riak = True
@inlineCallbacks
def setUp(self):
self._persist_setUp()
self.config = self.mk_config({})
self.vumi_api = yield VumiApi.from_config_async(self.config)
self.account = yield self.mk_user(self.vumi_api, u'user')
self.user_api = self.vumi_api.get_user_api(self.account.key)
self.dispatcher = DialogueActionDispatcher(self.user_api)
def create_dialogue(self, poll):
config = {
"poll": poll,
}
return self.create_conversation(
conversation_type=u'dialogue', config=config)
@inlineCallbacks
def test_get_poll(self):
conv = yield self.create_dialogue(poll={"foo": "bar"})
result = yield self.dispatcher.dispatch_action(conv, "get_poll", {})
self.assertEqual(result, {"poll": {"foo": "bar"}})
@inlineCallbacks
def test_save_poll(self):
conv = yield self.create_dialogue(poll={})
result = yield self.dispatcher.dispatch_action(
conv, "save_poll", {"poll": {"foo": "bar"}})
self.assertEqual(result, {"saved": True})
conv = yield self.user_api.get_conversation(conv.key)
self.assertEqual(conv.config, {
"poll": {"foo": "bar"},
})
|
Add tests for get_poll and save_poll."""Tests for go.apps.dialogue.dialogue_api."""
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from go.apps.dialogue.dialogue_api import DialogueActionDispatcher
from go.vumitools.api import VumiApi
from go.vumitools.tests.utils import GoAppWorkerTestMixin
class DialogueActionDispatcherTestCase(TestCase, GoAppWorkerTestMixin):
use_riak = True
@inlineCallbacks
def setUp(self):
self._persist_setUp()
self.config = self.mk_config({})
self.vumi_api = yield VumiApi.from_config_async(self.config)
self.account = yield self.mk_user(self.vumi_api, u'user')
self.user_api = self.vumi_api.get_user_api(self.account.key)
self.dispatcher = DialogueActionDispatcher(self.user_api)
def create_dialogue(self, poll):
config = {
"poll": poll,
}
return self.create_conversation(
conversation_type=u'dialogue', config=config)
@inlineCallbacks
def test_get_poll(self):
conv = yield self.create_dialogue(poll={"foo": "bar"})
result = yield self.dispatcher.dispatch_action(conv, "get_poll", {})
self.assertEqual(result, {"poll": {"foo": "bar"}})
@inlineCallbacks
def test_save_poll(self):
conv = yield self.create_dialogue(poll={})
result = yield self.dispatcher.dispatch_action(
conv, "save_poll", {"poll": {"foo": "bar"}})
self.assertEqual(result, {"saved": True})
conv = yield self.user_api.get_conversation(conv.key)
self.assertEqual(conv.config, {
"poll": {"foo": "bar"},
})
|
<commit_before><commit_msg>Add tests for get_poll and save_poll.<commit_after>"""Tests for go.apps.dialogue.dialogue_api."""
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import TestCase
from go.apps.dialogue.dialogue_api import DialogueActionDispatcher
from go.vumitools.api import VumiApi
from go.vumitools.tests.utils import GoAppWorkerTestMixin
class DialogueActionDispatcherTestCase(TestCase, GoAppWorkerTestMixin):
use_riak = True
@inlineCallbacks
def setUp(self):
self._persist_setUp()
self.config = self.mk_config({})
self.vumi_api = yield VumiApi.from_config_async(self.config)
self.account = yield self.mk_user(self.vumi_api, u'user')
self.user_api = self.vumi_api.get_user_api(self.account.key)
self.dispatcher = DialogueActionDispatcher(self.user_api)
def create_dialogue(self, poll):
config = {
"poll": poll,
}
return self.create_conversation(
conversation_type=u'dialogue', config=config)
@inlineCallbacks
def test_get_poll(self):
conv = yield self.create_dialogue(poll={"foo": "bar"})
result = yield self.dispatcher.dispatch_action(conv, "get_poll", {})
self.assertEqual(result, {"poll": {"foo": "bar"}})
@inlineCallbacks
def test_save_poll(self):
conv = yield self.create_dialogue(poll={})
result = yield self.dispatcher.dispatch_action(
conv, "save_poll", {"poll": {"foo": "bar"}})
self.assertEqual(result, {"saved": True})
conv = yield self.user_api.get_conversation(conv.key)
self.assertEqual(conv.config, {
"poll": {"foo": "bar"},
})
|
|
e274c17ac8bc61d5e96fcab579b8ad07c3a48403
|
queueing/resources.py
|
queueing/resources.py
|
"""
queueing.resources
==================
Utilities and resources for queueing.
:author: Michael Browning
:copyright: (c) 2013 by Michael Browning.
:license: BSD, see LICENSE for more details.
"""
import threading
class StoppableThread(threading.Thread):
"""A thread that exposes a stop command to halt execution."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
"""Send the thread a stop request."""
self._stop.set()
def stopped(self):
"""Check if thread is stopped."""
self._stop.isSet()
|
Allow stopping of all queueing threads and respond to keyboard interrupt in main
|
Allow stopping of all queueing threads and respond to keyboard interrupt in main
|
Python
|
bsd-3-clause
|
mrbrowning/queueing
|
Allow stopping of all queueing threads and respond to keyboard interrupt in main
|
"""
queueing.resources
==================
Utilities and resources for queueing.
:author: Michael Browning
:copyright: (c) 2013 by Michael Browning.
:license: BSD, see LICENSE for more details.
"""
import threading
class StoppableThread(threading.Thread):
"""A thread that exposes a stop command to halt execution."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
"""Send the thread a stop request."""
self._stop.set()
def stopped(self):
"""Check if thread is stopped."""
self._stop.isSet()
|
<commit_before><commit_msg>Allow stopping of all queueing threads and respond to keyboard interrupt in main<commit_after>
|
"""
queueing.resources
==================
Utilities and resources for queueing.
:author: Michael Browning
:copyright: (c) 2013 by Michael Browning.
:license: BSD, see LICENSE for more details.
"""
import threading
class StoppableThread(threading.Thread):
"""A thread that exposes a stop command to halt execution."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
"""Send the thread a stop request."""
self._stop.set()
def stopped(self):
"""Check if thread is stopped."""
self._stop.isSet()
|
Allow stopping of all queueing threads and respond to keyboard interrupt in main"""
queueing.resources
==================
Utilities and resources for queueing.
:author: Michael Browning
:copyright: (c) 2013 by Michael Browning.
:license: BSD, see LICENSE for more details.
"""
import threading
class StoppableThread(threading.Thread):
"""A thread that exposes a stop command to halt execution."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
"""Send the thread a stop request."""
self._stop.set()
def stopped(self):
"""Check if thread is stopped."""
self._stop.isSet()
|
<commit_before><commit_msg>Allow stopping of all queueing threads and respond to keyboard interrupt in main<commit_after>"""
queueing.resources
==================
Utilities and resources for queueing.
:author: Michael Browning
:copyright: (c) 2013 by Michael Browning.
:license: BSD, see LICENSE for more details.
"""
import threading
class StoppableThread(threading.Thread):
"""A thread that exposes a stop command to halt execution."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
"""Send the thread a stop request."""
self._stop.set()
def stopped(self):
"""Check if thread is stopped."""
self._stop.isSet()
|
|
475a573731de50695a12b8376d6f01ab155f5893
|
api/sonetworks/migrations/0029_post_published.py
|
api/sonetworks/migrations/0029_post_published.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='post',
name='published',
field=models.BooleanField(default=0),
),
]
|
UPDATE Post model, published boolea field added
|
UPDATE Post model, published boolea field added
|
Python
|
mit
|
semitki/semitki,semitki/semitki,semitki/semitki,semitki/semitki
|
UPDATE Post model, published boolea field added
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='post',
name='published',
field=models.BooleanField(default=0),
),
]
|
<commit_before><commit_msg>UPDATE Post model, published boolea field added<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='post',
name='published',
field=models.BooleanField(default=0),
),
]
|
UPDATE Post model, published boolea field added# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='post',
name='published',
field=models.BooleanField(default=0),
),
]
|
<commit_before><commit_msg>UPDATE Post model, published boolea field added<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-10 23:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0028_auto_20170509_0231'),
]
operations = [
migrations.AddField(
model_name='post',
name='published',
field=models.BooleanField(default=0),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.