commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd4bf48267e5110c92f4a7c51833c556a99ffc87
|
tests/views/test_admin_organisations_page.py
|
tests/views/test_admin_organisations_page.py
|
from tests import PMGLiveServerTestCase
from mock import patch
import unittest
from pmg.models import db, User
from tests.fixtures import dbfixture, UserData, OrganisationData
class TestAdminOrganisationsPage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminOrganisationsPage, self).setUp()
self.fx = dbfixture.data(UserData, OrganisationData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super(TestAdminOrganisationsPage, self).tearDown()
def test_admin_organisations_page(self):
"""
Test admin organisations page (/admin/organisation/)
"""
self.make_request("/admin/organisation/", self.user, follow_redirects=True)
self.assertIn("Organisations", self.html)
self.assertIn(self.fx.OrganisationData.pmg.name, self.html)
self.assertIn(self.fx.OrganisationData.pmg.domain, self.html)
|
Add admin list organisations test
|
Add admin list organisations test
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add admin list organisations test
|
from tests import PMGLiveServerTestCase
from mock import patch
import unittest
from pmg.models import db, User
from tests.fixtures import dbfixture, UserData, OrganisationData
class TestAdminOrganisationsPage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminOrganisationsPage, self).setUp()
self.fx = dbfixture.data(UserData, OrganisationData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super(TestAdminOrganisationsPage, self).tearDown()
def test_admin_organisations_page(self):
"""
Test admin organisations page (/admin/organisation/)
"""
self.make_request("/admin/organisation/", self.user, follow_redirects=True)
self.assertIn("Organisations", self.html)
self.assertIn(self.fx.OrganisationData.pmg.name, self.html)
self.assertIn(self.fx.OrganisationData.pmg.domain, self.html)
|
<commit_before><commit_msg>Add admin list organisations test<commit_after>
|
from tests import PMGLiveServerTestCase
from mock import patch
import unittest
from pmg.models import db, User
from tests.fixtures import dbfixture, UserData, OrganisationData
class TestAdminOrganisationsPage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminOrganisationsPage, self).setUp()
self.fx = dbfixture.data(UserData, OrganisationData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super(TestAdminOrganisationsPage, self).tearDown()
def test_admin_organisations_page(self):
"""
Test admin organisations page (/admin/organisation/)
"""
self.make_request("/admin/organisation/", self.user, follow_redirects=True)
self.assertIn("Organisations", self.html)
self.assertIn(self.fx.OrganisationData.pmg.name, self.html)
self.assertIn(self.fx.OrganisationData.pmg.domain, self.html)
|
Add admin list organisations testfrom tests import PMGLiveServerTestCase
from mock import patch
import unittest
from pmg.models import db, User
from tests.fixtures import dbfixture, UserData, OrganisationData
class TestAdminOrganisationsPage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminOrganisationsPage, self).setUp()
self.fx = dbfixture.data(UserData, OrganisationData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super(TestAdminOrganisationsPage, self).tearDown()
def test_admin_organisations_page(self):
"""
Test admin organisations page (/admin/organisation/)
"""
self.make_request("/admin/organisation/", self.user, follow_redirects=True)
self.assertIn("Organisations", self.html)
self.assertIn(self.fx.OrganisationData.pmg.name, self.html)
self.assertIn(self.fx.OrganisationData.pmg.domain, self.html)
|
<commit_before><commit_msg>Add admin list organisations test<commit_after>from tests import PMGLiveServerTestCase
from mock import patch
import unittest
from pmg.models import db, User
from tests.fixtures import dbfixture, UserData, OrganisationData
class TestAdminOrganisationsPage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminOrganisationsPage, self).setUp()
self.fx = dbfixture.data(UserData, OrganisationData)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.delete_created_objects()
self.fx.teardown()
super(TestAdminOrganisationsPage, self).tearDown()
def test_admin_organisations_page(self):
"""
Test admin organisations page (/admin/organisation/)
"""
self.make_request("/admin/organisation/", self.user, follow_redirects=True)
self.assertIn("Organisations", self.html)
self.assertIn(self.fx.OrganisationData.pmg.name, self.html)
self.assertIn(self.fx.OrganisationData.pmg.domain, self.html)
|
|
e3349e71047381a487bf7abe8c5563c4811a3668
|
tests/consoles_tests.py
|
tests/consoles_tests.py
|
import io
import spur
from nose.tools import istest, assert_equal
from toodlepip.consoles import Console
@istest
def console_writes_output_to_console():
console, output = _create_local_console()
console.run("Action", ["echo", "Go go go!"])
assert b"Go go go!" in output.getvalue()
def _create_local_console():
output = io.BytesIO()
shell = spur.LocalShell()
return Console(shell, output), output
|
Add basic test for console
|
Add basic test for console
|
Python
|
bsd-2-clause
|
mwilliamson/toodlepip
|
Add basic test for console
|
import io
import spur
from nose.tools import istest, assert_equal
from toodlepip.consoles import Console
@istest
def console_writes_output_to_console():
console, output = _create_local_console()
console.run("Action", ["echo", "Go go go!"])
assert b"Go go go!" in output.getvalue()
def _create_local_console():
output = io.BytesIO()
shell = spur.LocalShell()
return Console(shell, output), output
|
<commit_before><commit_msg>Add basic test for console<commit_after>
|
import io
import spur
from nose.tools import istest, assert_equal
from toodlepip.consoles import Console
@istest
def console_writes_output_to_console():
console, output = _create_local_console()
console.run("Action", ["echo", "Go go go!"])
assert b"Go go go!" in output.getvalue()
def _create_local_console():
output = io.BytesIO()
shell = spur.LocalShell()
return Console(shell, output), output
|
Add basic test for consoleimport io
import spur
from nose.tools import istest, assert_equal
from toodlepip.consoles import Console
@istest
def console_writes_output_to_console():
console, output = _create_local_console()
console.run("Action", ["echo", "Go go go!"])
assert b"Go go go!" in output.getvalue()
def _create_local_console():
output = io.BytesIO()
shell = spur.LocalShell()
return Console(shell, output), output
|
<commit_before><commit_msg>Add basic test for console<commit_after>import io
import spur
from nose.tools import istest, assert_equal
from toodlepip.consoles import Console
@istest
def console_writes_output_to_console():
console, output = _create_local_console()
console.run("Action", ["echo", "Go go go!"])
assert b"Go go go!" in output.getvalue()
def _create_local_console():
output = io.BytesIO()
shell = spur.LocalShell()
return Console(shell, output), output
|
|
f5b33f7e80176efeb0eb0d0ea6fc8a8c7463a429
|
corehq/motech/repeaters/management/commands/delete_duplicate_cancelled_records.py
|
corehq/motech/repeaters/management/commands/delete_duplicate_cancelled_records.py
|
import csv
import datetime
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.models import RepeatRecord
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
If there are multiple cancelled repeat records for a given payload id, this
will delete all but one for each payload, reducing the number of requests
that must be made.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
)
parser.add_argument(
'repeater_id',
)
def handle(self, domain, repeater_id, *args, **options):
records_by_payload_id = defaultdict(list)
records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
total_records = 0
for record in records:
records_by_payload_id[record.payload_id].append(record)
total_records += 1
unique_payloads = len(records_by_payload_id)
print ("There are {} total records and {} unique payload ids."
.format(total_records, unique_payloads))
print "Delete {} duplicate records?".format(total_records - unique_payloads)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = resolve_duplicates(records_by_payload_id)
filename = "cancelled_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
def resolve_duplicates(records_by_payload_id):
log = [('RepeatRecord ID', 'Payload ID', 'Deleted?')]
with IterDB(RepeatRecord.get_db()) as db:
for payload_id, records in records_by_payload_id.items():
log.append((records[0]._id, payload_id, records[0].failure_reason, 'No'))
if len(records) > 1:
for record in records[1:]:
db.delete(record)
log.append((record._id, payload_id, record.failure_reason, 'Yes'))
return log
|
Add management command to delete duplicate cancelled repeat records
|
Add management command to delete duplicate cancelled repeat records
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add management command to delete duplicate cancelled repeat records
|
import csv
import datetime
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.models import RepeatRecord
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
If there are multiple cancelled repeat records for a given payload id, this
will delete all but one for each payload, reducing the number of requests
that must be made.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
)
parser.add_argument(
'repeater_id',
)
def handle(self, domain, repeater_id, *args, **options):
records_by_payload_id = defaultdict(list)
records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
total_records = 0
for record in records:
records_by_payload_id[record.payload_id].append(record)
total_records += 1
unique_payloads = len(records_by_payload_id)
print ("There are {} total records and {} unique payload ids."
.format(total_records, unique_payloads))
print "Delete {} duplicate records?".format(total_records - unique_payloads)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = resolve_duplicates(records_by_payload_id)
filename = "cancelled_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
def resolve_duplicates(records_by_payload_id):
log = [('RepeatRecord ID', 'Payload ID', 'Deleted?')]
with IterDB(RepeatRecord.get_db()) as db:
for payload_id, records in records_by_payload_id.items():
log.append((records[0]._id, payload_id, records[0].failure_reason, 'No'))
if len(records) > 1:
for record in records[1:]:
db.delete(record)
log.append((record._id, payload_id, record.failure_reason, 'Yes'))
return log
|
<commit_before><commit_msg>Add management command to delete duplicate cancelled repeat records<commit_after>
|
import csv
import datetime
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.models import RepeatRecord
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
If there are multiple cancelled repeat records for a given payload id, this
will delete all but one for each payload, reducing the number of requests
that must be made.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
)
parser.add_argument(
'repeater_id',
)
def handle(self, domain, repeater_id, *args, **options):
records_by_payload_id = defaultdict(list)
records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
total_records = 0
for record in records:
records_by_payload_id[record.payload_id].append(record)
total_records += 1
unique_payloads = len(records_by_payload_id)
print ("There are {} total records and {} unique payload ids."
.format(total_records, unique_payloads))
print "Delete {} duplicate records?".format(total_records - unique_payloads)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = resolve_duplicates(records_by_payload_id)
filename = "cancelled_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
def resolve_duplicates(records_by_payload_id):
log = [('RepeatRecord ID', 'Payload ID', 'Deleted?')]
with IterDB(RepeatRecord.get_db()) as db:
for payload_id, records in records_by_payload_id.items():
log.append((records[0]._id, payload_id, records[0].failure_reason, 'No'))
if len(records) > 1:
for record in records[1:]:
db.delete(record)
log.append((record._id, payload_id, record.failure_reason, 'Yes'))
return log
|
Add management command to delete duplicate cancelled repeat recordsimport csv
import datetime
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.models import RepeatRecord
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
If there are multiple cancelled repeat records for a given payload id, this
will delete all but one for each payload, reducing the number of requests
that must be made.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
)
parser.add_argument(
'repeater_id',
)
def handle(self, domain, repeater_id, *args, **options):
records_by_payload_id = defaultdict(list)
records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
total_records = 0
for record in records:
records_by_payload_id[record.payload_id].append(record)
total_records += 1
unique_payloads = len(records_by_payload_id)
print ("There are {} total records and {} unique payload ids."
.format(total_records, unique_payloads))
print "Delete {} duplicate records?".format(total_records - unique_payloads)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = resolve_duplicates(records_by_payload_id)
filename = "cancelled_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
def resolve_duplicates(records_by_payload_id):
log = [('RepeatRecord ID', 'Payload ID', 'Deleted?')]
with IterDB(RepeatRecord.get_db()) as db:
for payload_id, records in records_by_payload_id.items():
log.append((records[0]._id, payload_id, records[0].failure_reason, 'No'))
if len(records) > 1:
for record in records[1:]:
db.delete(record)
log.append((record._id, payload_id, record.failure_reason, 'Yes'))
return log
|
<commit_before><commit_msg>Add management command to delete duplicate cancelled repeat records<commit_after>import csv
import datetime
from collections import defaultdict
from django.core.management.base import BaseCommand
from corehq.util.couch import IterDB
from corehq.motech.repeaters.const import RECORD_CANCELLED_STATE
from corehq.motech.repeaters.models import RepeatRecord
from corehq.motech.repeaters.dbaccessors import iter_repeat_records_by_domain
class Command(BaseCommand):
help = """
If there are multiple cancelled repeat records for a given payload id, this
will delete all but one for each payload, reducing the number of requests
that must be made.
"""
def add_arguments(self, parser):
parser.add_argument(
'domain',
)
parser.add_argument(
'repeater_id',
)
def handle(self, domain, repeater_id, *args, **options):
records_by_payload_id = defaultdict(list)
records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE)
total_records = 0
for record in records:
records_by_payload_id[record.payload_id].append(record)
total_records += 1
unique_payloads = len(records_by_payload_id)
print ("There are {} total records and {} unique payload ids."
.format(total_records, unique_payloads))
print "Delete {} duplicate records?".format(total_records - unique_payloads)
if not raw_input("(y/n)") == 'y':
print "Aborting"
return
log = resolve_duplicates(records_by_payload_id)
filename = "cancelled_repeat_records-{}.csv".format(datetime.datetime.utcnow().isoformat())
print "Writing log of changes to {}".format(filename)
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerows(log)
def resolve_duplicates(records_by_payload_id):
log = [('RepeatRecord ID', 'Payload ID', 'Deleted?')]
with IterDB(RepeatRecord.get_db()) as db:
for payload_id, records in records_by_payload_id.items():
log.append((records[0]._id, payload_id, records[0].failure_reason, 'No'))
if len(records) > 1:
for record in records[1:]:
db.delete(record)
log.append((record._id, payload_id, record.failure_reason, 'Yes'))
return log
|
|
4840b5b6326229a215ccd3ae4d230adc2f5cf374
|
tools/rename-library.py
|
tools/rename-library.py
|
# -*- coding: utf-8 -*-
"""
This script is mainly used to simplify benchmarks between different
versions of the library: it allows to rename every mention of the
library name, which helps to include two versions side-by-side
without conflicts, and to compare them in the same benchmark.
"""
import argparse
import fileinput
import fnmatch
import os
import sys
import pygit2
# Modify this dict to change the words to replace
REPLACEMENT_LIST = {
'cpp-sort': 'cpp-sort-old',
'cppsort': 'cppsort_old',
'CPPSORT': 'CPPSORT_OLD'
}
def main():
parser = argparse.ArgumentParser(description="Rename all the parts of the library that matter")
parser.add_argument('root', type=str)
args = parser.parse_args()
repo_root = os.path.abspath(args.root)
repo = pygit2.Repository(repo_root)
os.chdir(repo_root)
##################################################
# Rename the library mentions in code
# Files we are interested into match one of these patterns
filename_patterns = [
"*.h",
"*.cpp",
"CMakeLists.txt"
]
for root, dirs, files in os.walk(repo_root):
dirs[:] = [dir_path for dir_path in dirs
if not repo.path_is_ignored(dir_path)]
for file in files:
# Filter files
if repo.path_is_ignored(file):
continue
if not any(fnmatch.fnmatch(file, pattern)
for pattern in filename_patterns):
continue
print("Modifying:", os.path.join(root, file))
# Replace references to cpp-sort
with fileinput.FileInput(os.path.join(root, file), inplace=True) as fd:
for line in fd:
for old_str, new_str in REPLACEMENT_LIST.items():
line = line.replace(old_str, new_str)
print(line, end='')
##################################################
# Rename the main include directory
# Directory is removed after the files have been modified to make
# sure that the .gitignore is valid when modifying said files
old_dirname = os.path.join(repo_root, 'include', 'cpp-sort')
new_dirname = os.path.join(repo_root, 'include', REPLACEMENT_LIST['cpp-sort'])
os.rename(old_dirname, new_dirname)
if __name__ == '__main__':
main()
|
Add script to rename all references to cpp-sort in the library.
|
Add script to rename all references to cpp-sort in the library.
This tool mostly exists to simplify including and comparing different
versions of the same algorithms in benchmarks.
[ci skip]
|
Python
|
mit
|
Morwenn/cpp-sort,Morwenn/cpp-sort,Morwenn/cpp-sort,Morwenn/cpp-sort
|
Add script to rename all references to cpp-sort in the library.
This tool mostly exists to simplify including and comparing different
versions of the same algorithms in benchmarks.
[ci skip]
|
# -*- coding: utf-8 -*-
"""
This script is mainly used to simplify benchmarks between different
versions of the library: it allows to rename every mention of the
library name, which helps to include two versions side-by-side
without conflicts, and to compare them in the same benchmark.
"""
import argparse
import fileinput
import fnmatch
import os
import sys
import pygit2
# Modify this dict to change the words to replace
REPLACEMENT_LIST = {
'cpp-sort': 'cpp-sort-old',
'cppsort': 'cppsort_old',
'CPPSORT': 'CPPSORT_OLD'
}
def main():
parser = argparse.ArgumentParser(description="Rename all the parts of the library that matter")
parser.add_argument('root', type=str)
args = parser.parse_args()
repo_root = os.path.abspath(args.root)
repo = pygit2.Repository(repo_root)
os.chdir(repo_root)
##################################################
# Rename the library mentions in code
# Files we are interested into match one of these patterns
filename_patterns = [
"*.h",
"*.cpp",
"CMakeLists.txt"
]
for root, dirs, files in os.walk(repo_root):
dirs[:] = [dir_path for dir_path in dirs
if not repo.path_is_ignored(dir_path)]
for file in files:
# Filter files
if repo.path_is_ignored(file):
continue
if not any(fnmatch.fnmatch(file, pattern)
for pattern in filename_patterns):
continue
print("Modifying:", os.path.join(root, file))
# Replace references to cpp-sort
with fileinput.FileInput(os.path.join(root, file), inplace=True) as fd:
for line in fd:
for old_str, new_str in REPLACEMENT_LIST.items():
line = line.replace(old_str, new_str)
print(line, end='')
##################################################
# Rename the main include directory
# Directory is removed after the files have been modified to make
# sure that the .gitignore is valid when modifying said files
old_dirname = os.path.join(repo_root, 'include', 'cpp-sort')
new_dirname = os.path.join(repo_root, 'include', REPLACEMENT_LIST['cpp-sort'])
os.rename(old_dirname, new_dirname)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to rename all references to cpp-sort in the library.
This tool mostly exists to simplify including and comparing different
versions of the same algorithms in benchmarks.
[ci skip]<commit_after>
|
# -*- coding: utf-8 -*-
"""
This script is mainly used to simplify benchmarks between different
versions of the library: it allows to rename every mention of the
library name, which helps to include two versions side-by-side
without conflicts, and to compare them in the same benchmark.
"""
import argparse
import fileinput
import fnmatch
import os
import sys
import pygit2
# Modify this dict to change the words to replace
REPLACEMENT_LIST = {
'cpp-sort': 'cpp-sort-old',
'cppsort': 'cppsort_old',
'CPPSORT': 'CPPSORT_OLD'
}
def main():
parser = argparse.ArgumentParser(description="Rename all the parts of the library that matter")
parser.add_argument('root', type=str)
args = parser.parse_args()
repo_root = os.path.abspath(args.root)
repo = pygit2.Repository(repo_root)
os.chdir(repo_root)
##################################################
# Rename the library mentions in code
# Files we are interested into match one of these patterns
filename_patterns = [
"*.h",
"*.cpp",
"CMakeLists.txt"
]
for root, dirs, files in os.walk(repo_root):
dirs[:] = [dir_path for dir_path in dirs
if not repo.path_is_ignored(dir_path)]
for file in files:
# Filter files
if repo.path_is_ignored(file):
continue
if not any(fnmatch.fnmatch(file, pattern)
for pattern in filename_patterns):
continue
print("Modifying:", os.path.join(root, file))
# Replace references to cpp-sort
with fileinput.FileInput(os.path.join(root, file), inplace=True) as fd:
for line in fd:
for old_str, new_str in REPLACEMENT_LIST.items():
line = line.replace(old_str, new_str)
print(line, end='')
##################################################
# Rename the main include directory
# Directory is removed after the files have been modified to make
# sure that the .gitignore is valid when modifying said files
old_dirname = os.path.join(repo_root, 'include', 'cpp-sort')
new_dirname = os.path.join(repo_root, 'include', REPLACEMENT_LIST['cpp-sort'])
os.rename(old_dirname, new_dirname)
if __name__ == '__main__':
main()
|
Add script to rename all references to cpp-sort in the library.
This tool mostly exists to simplify including and comparing different
versions of the same algorithms in benchmarks.
[ci skip]# -*- coding: utf-8 -*-
"""
This script is mainly used to simplify benchmarks between different
versions of the library: it allows to rename every mention of the
library name, which helps to include two versions side-by-side
without conflicts, and to compare them in the same benchmark.
"""
import argparse
import fileinput
import fnmatch
import os
import sys
import pygit2
# Modify this dict to change the words to replace
REPLACEMENT_LIST = {
'cpp-sort': 'cpp-sort-old',
'cppsort': 'cppsort_old',
'CPPSORT': 'CPPSORT_OLD'
}
def main():
parser = argparse.ArgumentParser(description="Rename all the parts of the library that matter")
parser.add_argument('root', type=str)
args = parser.parse_args()
repo_root = os.path.abspath(args.root)
repo = pygit2.Repository(repo_root)
os.chdir(repo_root)
##################################################
# Rename the library mentions in code
# Files we are interested into match one of these patterns
filename_patterns = [
"*.h",
"*.cpp",
"CMakeLists.txt"
]
for root, dirs, files in os.walk(repo_root):
dirs[:] = [dir_path for dir_path in dirs
if not repo.path_is_ignored(dir_path)]
for file in files:
# Filter files
if repo.path_is_ignored(file):
continue
if not any(fnmatch.fnmatch(file, pattern)
for pattern in filename_patterns):
continue
print("Modifying:", os.path.join(root, file))
# Replace references to cpp-sort
with fileinput.FileInput(os.path.join(root, file), inplace=True) as fd:
for line in fd:
for old_str, new_str in REPLACEMENT_LIST.items():
line = line.replace(old_str, new_str)
print(line, end='')
##################################################
# Rename the main include directory
# Directory is removed after the files have been modified to make
# sure that the .gitignore is valid when modifying said files
old_dirname = os.path.join(repo_root, 'include', 'cpp-sort')
new_dirname = os.path.join(repo_root, 'include', REPLACEMENT_LIST['cpp-sort'])
os.rename(old_dirname, new_dirname)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to rename all references to cpp-sort in the library.
This tool mostly exists to simplify including and comparing different
versions of the same algorithms in benchmarks.
[ci skip]<commit_after># -*- coding: utf-8 -*-
"""
This script is mainly used to simplify benchmarks between different
versions of the library: it allows to rename every mention of the
library name, which helps to include two versions side-by-side
without conflicts, and to compare them in the same benchmark.
"""
import argparse
import fileinput
import fnmatch
import os
import sys
import pygit2
# Modify this dict to change the words to replace
REPLACEMENT_LIST = {
'cpp-sort': 'cpp-sort-old',
'cppsort': 'cppsort_old',
'CPPSORT': 'CPPSORT_OLD'
}
def main():
parser = argparse.ArgumentParser(description="Rename all the parts of the library that matter")
parser.add_argument('root', type=str)
args = parser.parse_args()
repo_root = os.path.abspath(args.root)
repo = pygit2.Repository(repo_root)
os.chdir(repo_root)
##################################################
# Rename the library mentions in code
# Files we are interested into match one of these patterns
filename_patterns = [
"*.h",
"*.cpp",
"CMakeLists.txt"
]
for root, dirs, files in os.walk(repo_root):
dirs[:] = [dir_path for dir_path in dirs
if not repo.path_is_ignored(dir_path)]
for file in files:
# Filter files
if repo.path_is_ignored(file):
continue
if not any(fnmatch.fnmatch(file, pattern)
for pattern in filename_patterns):
continue
print("Modifying:", os.path.join(root, file))
# Replace references to cpp-sort
with fileinput.FileInput(os.path.join(root, file), inplace=True) as fd:
for line in fd:
for old_str, new_str in REPLACEMENT_LIST.items():
line = line.replace(old_str, new_str)
print(line, end='')
##################################################
# Rename the main include directory
# Directory is removed after the files have been modified to make
# sure that the .gitignore is valid when modifying said files
old_dirname = os.path.join(repo_root, 'include', 'cpp-sort')
new_dirname = os.path.join(repo_root, 'include', REPLACEMENT_LIST['cpp-sort'])
os.rename(old_dirname, new_dirname)
if __name__ == '__main__':
main()
|
|
7721cd26ca18b655e4866fff17d4522cccc02a7a
|
migrations/versions/0369_update_sms_rates.py
|
migrations/versions/0369_update_sms_rates.py
|
"""
Revision ID: 0369_update_sms_rates
Revises: 0368_move_orgs_to_nhs_branding
Create Date: 2022-04-26 09:39:45.260951
"""
import uuid
from alembic import op
revision = '0369_update_sms_rates'
down_revision = '0368_move_orgs_to_nhs_branding'
def upgrade():
op.execute(
"INSERT INTO rates(id, valid_from, rate, notification_type) "
f"VALUES('{uuid.uuid4()}', '2022-04-30 23:00:00', 0.0172, 'sms')"
)
def downgrade():
pass
|
Add new SMS rates for 1 May 2022 onwards
|
Add new SMS rates for 1 May 2022 onwards
This change can be merged before the new rates go live, because they
won't be used until the start date.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add new SMS rates for 1 May 2022 onwards
This change can be merged before the new rates go live, because they
won't be used until the start date.
|
"""
Revision ID: 0369_update_sms_rates
Revises: 0368_move_orgs_to_nhs_branding
Create Date: 2022-04-26 09:39:45.260951
"""
import uuid
from alembic import op
revision = '0369_update_sms_rates'
down_revision = '0368_move_orgs_to_nhs_branding'
def upgrade():
op.execute(
"INSERT INTO rates(id, valid_from, rate, notification_type) "
f"VALUES('{uuid.uuid4()}', '2022-04-30 23:00:00', 0.0172, 'sms')"
)
def downgrade():
pass
|
<commit_before><commit_msg>Add new SMS rates for 1 May 2022 onwards
This change can be merged before the new rates go live, because they
won't be used until the start date.<commit_after>
|
"""
Revision ID: 0369_update_sms_rates
Revises: 0368_move_orgs_to_nhs_branding
Create Date: 2022-04-26 09:39:45.260951
"""
import uuid
from alembic import op
revision = '0369_update_sms_rates'
down_revision = '0368_move_orgs_to_nhs_branding'
def upgrade():
op.execute(
"INSERT INTO rates(id, valid_from, rate, notification_type) "
f"VALUES('{uuid.uuid4()}', '2022-04-30 23:00:00', 0.0172, 'sms')"
)
def downgrade():
pass
|
Add new SMS rates for 1 May 2022 onwards
This change can be merged before the new rates go live, because they
won't be used until the start date."""
Revision ID: 0369_update_sms_rates
Revises: 0368_move_orgs_to_nhs_branding
Create Date: 2022-04-26 09:39:45.260951
"""
import uuid
from alembic import op
revision = '0369_update_sms_rates'
down_revision = '0368_move_orgs_to_nhs_branding'
def upgrade():
op.execute(
"INSERT INTO rates(id, valid_from, rate, notification_type) "
f"VALUES('{uuid.uuid4()}', '2022-04-30 23:00:00', 0.0172, 'sms')"
)
def downgrade():
pass
|
<commit_before><commit_msg>Add new SMS rates for 1 May 2022 onwards
This change can be merged before the new rates go live, because they
won't be used until the start date.<commit_after>"""
Revision ID: 0369_update_sms_rates
Revises: 0368_move_orgs_to_nhs_branding
Create Date: 2022-04-26 09:39:45.260951
"""
import uuid
from alembic import op
revision = '0369_update_sms_rates'
down_revision = '0368_move_orgs_to_nhs_branding'
def upgrade():
op.execute(
"INSERT INTO rates(id, valid_from, rate, notification_type) "
f"VALUES('{uuid.uuid4()}', '2022-04-30 23:00:00', 0.0172, 'sms')"
)
def downgrade():
pass
|
|
b168e05cfdf9df1c8d4fa2fa851b75b6a6633708
|
dosagelib/plugins/derideal.py
|
dosagelib/plugins/derideal.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
|
Add site engine for Derideal
|
Add site engine for Derideal
|
Python
|
mit
|
peterjanes/dosage,webcomics/dosage,peterjanes/dosage,webcomics/dosage
|
Add site engine for Derideal
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
|
<commit_before><commit_msg>Add site engine for Derideal<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
|
Add site engine for Derideal# -*- coding: utf-8 -*-
# Copyright (C) 2019-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
|
<commit_before><commit_msg>Add site engine for Derideal<commit_after># -*- coding: utf-8 -*-
# Copyright (C) 2019-2020 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
|
|
a28e82121a5dcc68183c338b06264981227f8681
|
examples/example4_multiwf.py
|
examples/example4_multiwf.py
|
'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
Add example 4, demonstrating how to run meta workflow of multiple workflows
|
Add example 4, demonstrating how to run meta workflow of multiple workflows
|
Python
|
mit
|
pharmbio/sciluigi,pharmbio/sciluigi,samuell/sciluigi
|
Add example 4, demonstrating how to run meta workflow of multiple workflows
|
'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
<commit_before><commit_msg>Add example 4, demonstrating how to run meta workflow of multiple workflows<commit_after>
|
'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
Add example 4, demonstrating how to run meta workflow of multiple workflows'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
<commit_before><commit_msg>Add example 4, demonstrating how to run meta workflow of multiple workflows<commit_after>'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
|
a73f320e61f7ff937f4be3c69939edf77be60e0e
|
accloudtant/__main__.py
|
accloudtant/__main__.py
|
import csv
if __name__ == "__main__":
usage = []
with open("tests/fixtures/2021/03/S3.csv") as f:
reader = csv.DictReader(f)
for row in reader:
usage.append(row)
print("Simple Storage Service")
for entry in usage:
print(entry)
|
Print list of usage records from S3 CSV
|
Print list of usage records from S3 CSV
|
Python
|
apache-2.0
|
ifosch/accloudtant
|
Print list of usage records from S3 CSV
|
import csv
if __name__ == "__main__":
usage = []
with open("tests/fixtures/2021/03/S3.csv") as f:
reader = csv.DictReader(f)
for row in reader:
usage.append(row)
print("Simple Storage Service")
for entry in usage:
print(entry)
|
<commit_before><commit_msg>Print list of usage records from S3 CSV<commit_after>
|
import csv
if __name__ == "__main__":
usage = []
with open("tests/fixtures/2021/03/S3.csv") as f:
reader = csv.DictReader(f)
for row in reader:
usage.append(row)
print("Simple Storage Service")
for entry in usage:
print(entry)
|
Print list of usage records from S3 CSVimport csv
if __name__ == "__main__":
usage = []
with open("tests/fixtures/2021/03/S3.csv") as f:
reader = csv.DictReader(f)
for row in reader:
usage.append(row)
print("Simple Storage Service")
for entry in usage:
print(entry)
|
<commit_before><commit_msg>Print list of usage records from S3 CSV<commit_after>import csv
if __name__ == "__main__":
usage = []
with open("tests/fixtures/2021/03/S3.csv") as f:
reader = csv.DictReader(f)
for row in reader:
usage.append(row)
print("Simple Storage Service")
for entry in usage:
print(entry)
|
|
c70818bbcb678e6a0ab94389e034c2d59da463f8
|
maker/tests/test_category.py
|
maker/tests/test_category.py
|
from django.test import TestCase
from maker.migrations.default_categories import DEFAULT_CATEGORIES
from maker.models import Category
class CategoryTestCase(TestCase):
def test_pre_install(self):
categories = Category.objects.all()
self.assertEqual(len(DEFAULT_CATEGORIES), len(categories))
for c in categories:
self.assertTrue(c.name in DEFAULT_CATEGORIES)
def test_str(self):
category = Category.objects.get(name=DEFAULT_CATEGORIES[0])
self.assertEqual(category.name, str(category))
|
Add tests for Category model
|
Add tests for Category model
|
Python
|
agpl-3.0
|
fdroidtravis/repomaker,fdroidtravis/repomaker,fdroidtravis/repomaker,fdroidtravis/repomaker
|
Add tests for Category model
|
from django.test import TestCase
from maker.migrations.default_categories import DEFAULT_CATEGORIES
from maker.models import Category
class CategoryTestCase(TestCase):
def test_pre_install(self):
categories = Category.objects.all()
self.assertEqual(len(DEFAULT_CATEGORIES), len(categories))
for c in categories:
self.assertTrue(c.name in DEFAULT_CATEGORIES)
def test_str(self):
category = Category.objects.get(name=DEFAULT_CATEGORIES[0])
self.assertEqual(category.name, str(category))
|
<commit_before><commit_msg>Add tests for Category model<commit_after>
|
from django.test import TestCase
from maker.migrations.default_categories import DEFAULT_CATEGORIES
from maker.models import Category
class CategoryTestCase(TestCase):
def test_pre_install(self):
categories = Category.objects.all()
self.assertEqual(len(DEFAULT_CATEGORIES), len(categories))
for c in categories:
self.assertTrue(c.name in DEFAULT_CATEGORIES)
def test_str(self):
category = Category.objects.get(name=DEFAULT_CATEGORIES[0])
self.assertEqual(category.name, str(category))
|
Add tests for Category modelfrom django.test import TestCase
from maker.migrations.default_categories import DEFAULT_CATEGORIES
from maker.models import Category
class CategoryTestCase(TestCase):
def test_pre_install(self):
categories = Category.objects.all()
self.assertEqual(len(DEFAULT_CATEGORIES), len(categories))
for c in categories:
self.assertTrue(c.name in DEFAULT_CATEGORIES)
def test_str(self):
category = Category.objects.get(name=DEFAULT_CATEGORIES[0])
self.assertEqual(category.name, str(category))
|
<commit_before><commit_msg>Add tests for Category model<commit_after>from django.test import TestCase
from maker.migrations.default_categories import DEFAULT_CATEGORIES
from maker.models import Category
class CategoryTestCase(TestCase):
def test_pre_install(self):
categories = Category.objects.all()
self.assertEqual(len(DEFAULT_CATEGORIES), len(categories))
for c in categories:
self.assertTrue(c.name in DEFAULT_CATEGORIES)
def test_str(self):
category = Category.objects.get(name=DEFAULT_CATEGORIES[0])
self.assertEqual(category.name, str(category))
|
|
6f75300037254f51f1512a271bf7850a4bc0a8f8
|
djangospam/cookie/urls.py
|
djangospam/cookie/urls.py
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
Add support for Django 1.4 and up
|
Add support for Django 1.4 and up
* Module django.conf.urls.defaults has been moved to django.conf.urls in
version 1.4.
Commit references issue #3.
|
Python
|
bsd-2-clause
|
leandroarndt/djangospam,leandroarndt/djangospam
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)Add support for Django 1.4 and up
* Module django.conf.urls.defaults has been moved to django.conf.urls in
version 1.4.
Commit references issue #3.
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
<commit_before># -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)<commit_msg>Add support for Django 1.4 and up
* Module django.conf.urls.defaults has been moved to django.conf.urls in
version 1.4.
Commit references issue #3.<commit_after>
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)Add support for Django 1.4 and up
* Module django.conf.urls.defaults has been moved to django.conf.urls in
version 1.4.
Commit references issue #3.# -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
<commit_before># -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)<commit_msg>Add support for Django 1.4 and up
* Module django.conf.urls.defaults has been moved to django.conf.urls in
version 1.4.
Commit references issue #3.<commit_after># -*- coding: utf-8 -*-
"""URL for setting SPAM value to the `djangospam.cookie` cookie.
You must also add `(r"^somewhere/", include("djangospam.cookie.urls")`
to your url patterns (usually in your root urls.conf; `somewhere`
may be any path, except the one used for true posts).
"""
try:
from django.conf.urls import patterns
except ImportError:
from django.conf.urls.defaults import patterns
urlpatterns = patterns('',
(r'^post$', 'djangospam.cookie.views.spammer_view'),)
|
42fdc9f9242df112046763560011336869d3efa1
|
sort_includes.py
|
sort_includes.py
|
#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
import re
import sys
import tempfile
def sort_includes(f):
lines = f.readlines()
look_for_api_header = f.name[-4:] == '.cpp'
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
project_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if headers_begin == 0:
headers_begin = i
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if header.startswith('<'):
system_headers.append(header)
continue
if header.startswith('"llvm/') or header.startswith('"clang/'):
project_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if headers_begin != 0:
break
if l.startswith('//') or l.startswith('#define'):
continue
break
if headers_begin == 0:
return
local_headers.sort()
project_headers.sort()
system_headers.sort()
headers = api_headers + local_headers + project_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
#for l in lines[headers_begin:headers_end]:
# print l.rstrip()
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
|
Add a completely hack-ish tool to sort includes according to the coding standards.
|
Add a completely hack-ish tool to sort includes according to the coding
standards.
I am a terrible Python programmer. Patches more the welcome. Please tell
me how this should look if it should look differently. It's just a tiny
little script so it didn't make sense to go through pre-commit review,
especially as someone who actually knows python may want to just rip it
apart and do it The Right Way.
I will be preparing a commit shortly that uses this script to
canonicalize *all* of the #include lines in LLVM. Really, all of them.
git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@169125 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
bsd-3-clause
|
lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx
|
Add a completely hack-ish tool to sort includes according to the coding
standards.
I am a terrible Python programmer. Patches more the welcome. Please tell
me how this should look if it should look differently. It's just a tiny
little script so it didn't make sense to go through pre-commit review,
especially as someone who actually knows python may want to just rip it
apart and do it The Right Way.
I will be preparing a commit shortly that uses this script to
canonicalize *all* of the #include lines in LLVM. Really, all of them.
git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@169125 91177308-0d34-0410-b5e6-96231b3b80d8
|
#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
import re
import sys
import tempfile
def sort_includes(f):
lines = f.readlines()
look_for_api_header = f.name[-4:] == '.cpp'
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
project_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if headers_begin == 0:
headers_begin = i
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if header.startswith('<'):
system_headers.append(header)
continue
if header.startswith('"llvm/') or header.startswith('"clang/'):
project_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if headers_begin != 0:
break
if l.startswith('//') or l.startswith('#define'):
continue
break
if headers_begin == 0:
return
local_headers.sort()
project_headers.sort()
system_headers.sort()
headers = api_headers + local_headers + project_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
#for l in lines[headers_begin:headers_end]:
# print l.rstrip()
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a completely hack-ish tool to sort includes according to the coding
standards.
I am a terrible Python programmer. Patches more the welcome. Please tell
me how this should look if it should look differently. It's just a tiny
little script so it didn't make sense to go through pre-commit review,
especially as someone who actually knows python may want to just rip it
apart and do it The Right Way.
I will be preparing a commit shortly that uses this script to
canonicalize *all* of the #include lines in LLVM. Really, all of them.
git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@169125 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
import re
import sys
import tempfile
def sort_includes(f):
lines = f.readlines()
look_for_api_header = f.name[-4:] == '.cpp'
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
project_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if headers_begin == 0:
headers_begin = i
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if header.startswith('<'):
system_headers.append(header)
continue
if header.startswith('"llvm/') or header.startswith('"clang/'):
project_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if headers_begin != 0:
break
if l.startswith('//') or l.startswith('#define'):
continue
break
if headers_begin == 0:
return
local_headers.sort()
project_headers.sort()
system_headers.sort()
headers = api_headers + local_headers + project_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
#for l in lines[headers_begin:headers_end]:
# print l.rstrip()
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
|
Add a completely hack-ish tool to sort includes according to the coding
standards.
I am a terrible Python programmer. Patches more the welcome. Please tell
me how this should look if it should look differently. It's just a tiny
little script so it didn't make sense to go through pre-commit review,
especially as someone who actually knows python may want to just rip it
apart and do it The Right Way.
I will be preparing a commit shortly that uses this script to
canonicalize *all* of the #include lines in LLVM. Really, all of them.
git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@169125 91177308-0d34-0410-b5e6-96231b3b80d8#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
import re
import sys
import tempfile
def sort_includes(f):
lines = f.readlines()
look_for_api_header = f.name[-4:] == '.cpp'
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
project_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if headers_begin == 0:
headers_begin = i
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if header.startswith('<'):
system_headers.append(header)
continue
if header.startswith('"llvm/') or header.startswith('"clang/'):
project_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if headers_begin != 0:
break
if l.startswith('//') or l.startswith('#define'):
continue
break
if headers_begin == 0:
return
local_headers.sort()
project_headers.sort()
system_headers.sort()
headers = api_headers + local_headers + project_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
#for l in lines[headers_begin:headers_end]:
# print l.rstrip()
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a completely hack-ish tool to sort includes according to the coding
standards.
I am a terrible Python programmer. Patches more the welcome. Please tell
me how this should look if it should look differently. It's just a tiny
little script so it didn't make sense to go through pre-commit review,
especially as someone who actually knows python may want to just rip it
apart and do it The Right Way.
I will be preparing a commit shortly that uses this script to
canonicalize *all* of the #include lines in LLVM. Really, all of them.
git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@169125 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>#!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
import re
import sys
import tempfile
def sort_includes(f):
lines = f.readlines()
look_for_api_header = f.name[-4:] == '.cpp'
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
project_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if headers_begin == 0:
headers_begin = i
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if header.startswith('<'):
system_headers.append(header)
continue
if header.startswith('"llvm/') or header.startswith('"clang/'):
project_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if headers_begin != 0:
break
if l.startswith('//') or l.startswith('#define'):
continue
break
if headers_begin == 0:
return
local_headers.sort()
project_headers.sort()
system_headers.sort()
headers = api_headers + local_headers + project_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
#for l in lines[headers_begin:headers_end]:
# print l.rstrip()
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
|
|
12a5ed54c39dcabbf72c0a80b1e2ac6f3e9a576c
|
synoptic-to-org.py
|
synoptic-to-org.py
|
import sys
import re
def import_file(lines):
tags_label = "TAGS: "
separator = 60*"-"
idx = 0
while idx < len(lines):
assert lines[idx].startswith(tags_label)
tags = lines[idx][len(tags_label):].split(",")
tags = [t.strip() for t in tags if t.strip()]
idx += 1
body = []
while idx < len(lines) and not lines[idx].startswith(separator):
body.append(lines[idx])
idx += 1
idx += 1 # skip separator
import pypandoc
body = "\n".join(body)
body = pypandoc.convert(body, "org", format="markdown_phpextra")
if tags:
lineend = body.find("\n")
if lineend == -1:
lineend = len(body)
tags_str = " " + ":%s:" % (":".join(tags))
body = body[:lineend] + tags_str + body[lineend:]
body = "** " + re.sub(r"^\*+ ", "", body)
print(body)
def main():
with open(sys.argv[1], "r") as inf:
lines = list(inf)
import_file(lines)
if __name__ == "__main__":
main()
|
Add org mode export script
|
Add org mode export script
|
Python
|
mit
|
inducer/synoptic,inducer/synoptic,inducer/synoptic
|
Add org mode export script
|
import sys
import re
def import_file(lines):
tags_label = "TAGS: "
separator = 60*"-"
idx = 0
while idx < len(lines):
assert lines[idx].startswith(tags_label)
tags = lines[idx][len(tags_label):].split(",")
tags = [t.strip() for t in tags if t.strip()]
idx += 1
body = []
while idx < len(lines) and not lines[idx].startswith(separator):
body.append(lines[idx])
idx += 1
idx += 1 # skip separator
import pypandoc
body = "\n".join(body)
body = pypandoc.convert(body, "org", format="markdown_phpextra")
if tags:
lineend = body.find("\n")
if lineend == -1:
lineend = len(body)
tags_str = " " + ":%s:" % (":".join(tags))
body = body[:lineend] + tags_str + body[lineend:]
body = "** " + re.sub(r"^\*+ ", "", body)
print(body)
def main():
with open(sys.argv[1], "r") as inf:
lines = list(inf)
import_file(lines)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add org mode export script<commit_after>
|
import sys
import re
def import_file(lines):
tags_label = "TAGS: "
separator = 60*"-"
idx = 0
while idx < len(lines):
assert lines[idx].startswith(tags_label)
tags = lines[idx][len(tags_label):].split(",")
tags = [t.strip() for t in tags if t.strip()]
idx += 1
body = []
while idx < len(lines) and not lines[idx].startswith(separator):
body.append(lines[idx])
idx += 1
idx += 1 # skip separator
import pypandoc
body = "\n".join(body)
body = pypandoc.convert(body, "org", format="markdown_phpextra")
if tags:
lineend = body.find("\n")
if lineend == -1:
lineend = len(body)
tags_str = " " + ":%s:" % (":".join(tags))
body = body[:lineend] + tags_str + body[lineend:]
body = "** " + re.sub(r"^\*+ ", "", body)
print(body)
def main():
with open(sys.argv[1], "r") as inf:
lines = list(inf)
import_file(lines)
if __name__ == "__main__":
main()
|
Add org mode export scriptimport sys
import re
def import_file(lines):
tags_label = "TAGS: "
separator = 60*"-"
idx = 0
while idx < len(lines):
assert lines[idx].startswith(tags_label)
tags = lines[idx][len(tags_label):].split(",")
tags = [t.strip() for t in tags if t.strip()]
idx += 1
body = []
while idx < len(lines) and not lines[idx].startswith(separator):
body.append(lines[idx])
idx += 1
idx += 1 # skip separator
import pypandoc
body = "\n".join(body)
body = pypandoc.convert(body, "org", format="markdown_phpextra")
if tags:
lineend = body.find("\n")
if lineend == -1:
lineend = len(body)
tags_str = " " + ":%s:" % (":".join(tags))
body = body[:lineend] + tags_str + body[lineend:]
body = "** " + re.sub(r"^\*+ ", "", body)
print(body)
def main():
with open(sys.argv[1], "r") as inf:
lines = list(inf)
import_file(lines)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add org mode export script<commit_after>import sys
import re
def import_file(lines):
tags_label = "TAGS: "
separator = 60*"-"
idx = 0
while idx < len(lines):
assert lines[idx].startswith(tags_label)
tags = lines[idx][len(tags_label):].split(",")
tags = [t.strip() for t in tags if t.strip()]
idx += 1
body = []
while idx < len(lines) and not lines[idx].startswith(separator):
body.append(lines[idx])
idx += 1
idx += 1 # skip separator
import pypandoc
body = "\n".join(body)
body = pypandoc.convert(body, "org", format="markdown_phpextra")
if tags:
lineend = body.find("\n")
if lineend == -1:
lineend = len(body)
tags_str = " " + ":%s:" % (":".join(tags))
body = body[:lineend] + tags_str + body[lineend:]
body = "** " + re.sub(r"^\*+ ", "", body)
print(body)
def main():
with open(sys.argv[1], "r") as inf:
lines = list(inf)
import_file(lines)
if __name__ == "__main__":
main()
|
|
2a0ed84f9ce2077d352c0827c2dd5fac514cb91d
|
setup.py
|
setup.py
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
Add py3.3 to trove classifiers
|
Add py3.3 to trove classifiers
|
Python
|
mit
|
wintersandroid/tvrenamr,ghickman/tvrenamr
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
Add py3.3 to trove classifiers
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
<commit_before>import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
<commit_msg>Add py3.3 to trove classifiers<commit_after>
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
Add py3.3 to trove classifiersimport multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
<commit_before>import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
<commit_msg>Add py3.3 to trove classifiers<commit_after>import multiprocessing # stop tests breaking tox
from setuptools import setup
import tvrenamr
requires = ('pyyaml', 'requests')
packages = ('tvrenamr',)
setup_requires = ('minimock', 'mock', 'nose', 'pyyaml')
setup(
name = tvrenamr.__title__,
version = tvrenamr.__version__,
description = 'Rename tv show files using online databases',
long_description = open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read(),
author = tvrenamr.__author__,
author_email = 'george@ghickman.co.uk',
url = 'http://tvrenamr.info',
license = open('LICENSE').read(),
packages = packages,
entry_points = {'console_scripts': ['tvr = tvrenamr.frontend:run']},
classifiers = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English'
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Multimedia',
'Topic :: Utilities',
),
install_requires = requires,
setup_requires=setup_requires,
test_suite='nose.collector',
)
|
f6ee98e4ed4529b0d699f3cf2d107a1ed8faa6f0
|
designate/backend/impl_powerdns/migrate_repo/versions/012_records_drop_duped_index.py
|
designate/backend/impl_powerdns/migrate_repo/versions/012_records_drop_duped_index.py
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
# Backport of 017_records_drop_duped_index.py to Kilo
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
Remove duplicated index on table 'records' of pDNS backend
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980
|
Python
|
apache-2.0
|
kiall/designate-py3,cneill/designate,kiall/designate-py3,ionrock/designate,tonyli71/designate,muraliselva10/designate,cneill/designate,grahamhayes/designate,kiall/designate-py3,ramsateesh/designate,cneill/designate,cneill/designate-testing,tonyli71/designate,kiall/designate-py3,openstack/designate,openstack/designate,muraliselva10/designate,cneill/designate-testing,kiall/designate-py3,cneill/designate-testing,grahamhayes/designate,ionrock/designate,grahamhayes/designate,ramsateesh/designate,ramsateesh/designate,muraliselva10/designate,ionrock/designate,openstack/designate,cneill/designate,cneill/designate,tonyli71/designate
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
# Backport of 017_records_drop_duped_index.py to Kilo
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
<commit_before><commit_msg>Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980<commit_after>
|
# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
# Backport of 017_records_drop_duped_index.py to Kilo
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980# Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
# Backport of 017_records_drop_duped_index.py to Kilo
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
<commit_before><commit_msg>Remove duplicated index on table 'records' of pDNS backend
Remove duplicated index `rec_name_index` on table 'records' of pDNS backend.
Change-Id: I9e8723b464522a588f0e0ef6ff261b71609b0726
Closes-Bug: 1446980<commit_after># Copyright 2015 NetEase, Inc.
#
# Author: Zhang Gengyuan <stanzgy@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Index, Table
meta = MetaData()
# Backport of 017_records_drop_duped_index.py to Kilo
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.drop()
def downgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
name_idx = Index('rec_name_index',
records_table.c.name)
name_idx.create()
|
|
d6ef33d682008bbdbb00117266670607c4f66056
|
choosealicense/test/test_show.py
|
choosealicense/test/test_show.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license show` function
"""
from click.testing import CliRunner
from choosealicense.main import show
def test_show_all_the_licenses():
runner = CliRunner()
result = runner.invoke(show)
it_should_be = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, isc, '
'lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
assert result.exit_code == 0
assert result.output.strip() == it_should_be
|
Add test for `license show` function
|
Add test for `license show` function
|
Python
|
mit
|
lord63/choosealicense-cli
|
Add test for `license show` function
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license show` function
"""
from click.testing import CliRunner
from choosealicense.main import show
def test_show_all_the_licenses():
runner = CliRunner()
result = runner.invoke(show)
it_should_be = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, isc, '
'lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
assert result.exit_code == 0
assert result.output.strip() == it_should_be
|
<commit_before><commit_msg>Add test for `license show` function<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license show` function
"""
from click.testing import CliRunner
from choosealicense.main import show
def test_show_all_the_licenses():
runner = CliRunner()
result = runner.invoke(show)
it_should_be = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, isc, '
'lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
assert result.exit_code == 0
assert result.output.strip() == it_should_be
|
Add test for `license show` function#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license show` function
"""
from click.testing import CliRunner
from choosealicense.main import show
def test_show_all_the_licenses():
runner = CliRunner()
result = runner.invoke(show)
it_should_be = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, isc, '
'lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
assert result.exit_code == 0
assert result.output.strip() == it_should_be
|
<commit_before><commit_msg>Add test for `license show` function<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license show` function
"""
from click.testing import CliRunner
from choosealicense.main import show
def test_show_all_the_licenses():
runner = CliRunner()
result = runner.invoke(show)
it_should_be = ('agpl-3.0, apache-2.0, artistic-2.0, bsd-2-clause, '
'bsd-3-clause, cc0-1.0, epl-1.0, gpl-2.0, gpl-3.0, isc, '
'lgpl-2.1, lgpl-3.0, mit, mpl-2.0, unlicense')
assert result.exit_code == 0
assert result.output.strip() == it_should_be
|
|
6a6f7a6c6abe78094eb69b447b1b60960bf855ed
|
animation.py
|
animation.py
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy.random import randn
from matplotlib import cm
from datetime import datetime as dt
from Bedau.Evolution import Evolution
# First set up the figure, the axis, and the plot element we want to animate
#
world_size = 128
pop_size = 100
mutation_rate = 0.01
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
evol = Evolution(world_size=world_size, pop_size=pop_size, mutation_rate=mutation_rate,
meta_mutation=meta_mutation, meta_mutation_range=meta_mutation_range)
evol.iterate(50)
fig, ax = plt.subplots()
ax = plt.axes(xlim=(0, 128), ylim=(0, 128))
line1 = ax.imshow(evol.history[0][0], shape=(128, 128),
interpolation='nearest', cmap=cm.coolwarm)
line2 = ax.scatter([], [], s=10, c='red')
def init():
line1.set_array([[], []])
line2.set_offsets([])
return line1, line2
def animate(i):
line1.set_array(evol.history[i][0])
line2.set_offsets(evol.history[i][1])
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(
fig, animate, frames=len(evol.history), interval=300, blit=True, init_func=init, repeat=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=2, extra_args=['-vcodec', 'libx264'])
name = dt.now().strftime('%Y-%m-%d_%H-%M') + '.mp4'
anim.save(name, fps=5, dpi=300, extra_args=['-vcodec', 'libx264'])
# lt.show()
|
Add file to experiment with plotting of world
|
Add file to experiment with plotting of world
|
Python
|
mit
|
LapoFrati/GeneticAlgorithms
|
Add file to experiment with plotting of world
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy.random import randn
from matplotlib import cm
from datetime import datetime as dt
from Bedau.Evolution import Evolution
# First set up the figure, the axis, and the plot element we want to animate
#
world_size = 128
pop_size = 100
mutation_rate = 0.01
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
evol = Evolution(world_size=world_size, pop_size=pop_size, mutation_rate=mutation_rate,
meta_mutation=meta_mutation, meta_mutation_range=meta_mutation_range)
evol.iterate(50)
fig, ax = plt.subplots()
ax = plt.axes(xlim=(0, 128), ylim=(0, 128))
line1 = ax.imshow(evol.history[0][0], shape=(128, 128),
interpolation='nearest', cmap=cm.coolwarm)
line2 = ax.scatter([], [], s=10, c='red')
def init():
line1.set_array([[], []])
line2.set_offsets([])
return line1, line2
def animate(i):
line1.set_array(evol.history[i][0])
line2.set_offsets(evol.history[i][1])
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(
fig, animate, frames=len(evol.history), interval=300, blit=True, init_func=init, repeat=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=2, extra_args=['-vcodec', 'libx264'])
name = dt.now().strftime('%Y-%m-%d_%H-%M') + '.mp4'
anim.save(name, fps=5, dpi=300, extra_args=['-vcodec', 'libx264'])
# lt.show()
|
<commit_before><commit_msg>Add file to experiment with plotting of world<commit_after>
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy.random import randn
from matplotlib import cm
from datetime import datetime as dt
from Bedau.Evolution import Evolution
# First set up the figure, the axis, and the plot element we want to animate
#
world_size = 128
pop_size = 100
mutation_rate = 0.01
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
evol = Evolution(world_size=world_size, pop_size=pop_size, mutation_rate=mutation_rate,
meta_mutation=meta_mutation, meta_mutation_range=meta_mutation_range)
evol.iterate(50)
fig, ax = plt.subplots()
ax = plt.axes(xlim=(0, 128), ylim=(0, 128))
line1 = ax.imshow(evol.history[0][0], shape=(128, 128),
interpolation='nearest', cmap=cm.coolwarm)
line2 = ax.scatter([], [], s=10, c='red')
def init():
line1.set_array([[], []])
line2.set_offsets([])
return line1, line2
def animate(i):
line1.set_array(evol.history[i][0])
line2.set_offsets(evol.history[i][1])
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(
fig, animate, frames=len(evol.history), interval=300, blit=True, init_func=init, repeat=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=2, extra_args=['-vcodec', 'libx264'])
name = dt.now().strftime('%Y-%m-%d_%H-%M') + '.mp4'
anim.save(name, fps=5, dpi=300, extra_args=['-vcodec', 'libx264'])
# lt.show()
|
Add file to experiment with plotting of worldimport numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy.random import randn
from matplotlib import cm
from datetime import datetime as dt
from Bedau.Evolution import Evolution
# First set up the figure, the axis, and the plot element we want to animate
#
world_size = 128
pop_size = 100
mutation_rate = 0.01
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
evol = Evolution(world_size=world_size, pop_size=pop_size, mutation_rate=mutation_rate,
meta_mutation=meta_mutation, meta_mutation_range=meta_mutation_range)
evol.iterate(50)
fig, ax = plt.subplots()
ax = plt.axes(xlim=(0, 128), ylim=(0, 128))
line1 = ax.imshow(evol.history[0][0], shape=(128, 128),
interpolation='nearest', cmap=cm.coolwarm)
line2 = ax.scatter([], [], s=10, c='red')
def init():
line1.set_array([[], []])
line2.set_offsets([])
return line1, line2
def animate(i):
line1.set_array(evol.history[i][0])
line2.set_offsets(evol.history[i][1])
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(
fig, animate, frames=len(evol.history), interval=300, blit=True, init_func=init, repeat=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=2, extra_args=['-vcodec', 'libx264'])
name = dt.now().strftime('%Y-%m-%d_%H-%M') + '.mp4'
anim.save(name, fps=5, dpi=300, extra_args=['-vcodec', 'libx264'])
# lt.show()
|
<commit_before><commit_msg>Add file to experiment with plotting of world<commit_after>import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy.random import randn
from matplotlib import cm
from datetime import datetime as dt
from Bedau.Evolution import Evolution
# First set up the figure, the axis, and the plot element we want to animate
#
world_size = 128
pop_size = 100
mutation_rate = 0.01
meta_mutation = 0.66
meta_mutation_range = 0.0025 # from paper
evol = Evolution(world_size=world_size, pop_size=pop_size, mutation_rate=mutation_rate,
meta_mutation=meta_mutation, meta_mutation_range=meta_mutation_range)
evol.iterate(50)
fig, ax = plt.subplots()
ax = plt.axes(xlim=(0, 128), ylim=(0, 128))
line1 = ax.imshow(evol.history[0][0], shape=(128, 128),
interpolation='nearest', cmap=cm.coolwarm)
line2 = ax.scatter([], [], s=10, c='red')
def init():
line1.set_array([[], []])
line2.set_offsets([])
return line1, line2
def animate(i):
line1.set_array(evol.history[i][0])
line2.set_offsets(evol.history[i][1])
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(
fig, animate, frames=len(evol.history), interval=300, blit=True, init_func=init, repeat=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=2, extra_args=['-vcodec', 'libx264'])
name = dt.now().strftime('%Y-%m-%d_%H-%M') + '.mp4'
anim.save(name, fps=5, dpi=300, extra_args=['-vcodec', 'libx264'])
# lt.show()
|
|
37e0d6ae04da6b127e214afba1d8d1adabfa707e
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_admin.py
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_admin.py
|
import pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.first()
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
Add test for the UserAdmin
|
Add test for the UserAdmin
|
Python
|
bsd-3-clause
|
pydanny/cookiecutter-django,ryankanno/cookiecutter-django,ryankanno/cookiecutter-django,trungdong/cookiecutter-django,ryankanno/cookiecutter-django,pydanny/cookiecutter-django,trungdong/cookiecutter-django,ryankanno/cookiecutter-django,pydanny/cookiecutter-django,trungdong/cookiecutter-django,trungdong/cookiecutter-django,pydanny/cookiecutter-django
|
Add test for the UserAdmin
|
import pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.first()
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
<commit_before><commit_msg>Add test for the UserAdmin<commit_after>
|
import pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.first()
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
Add test for the UserAdminimport pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.first()
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
<commit_before><commit_msg>Add test for the UserAdmin<commit_after>import pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.first()
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
|
38abc2d155492e1dadb507a63a484f2147118163
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name='python-evrythng',
version='0.1',
packages=['evrythng', 'evrythng.entities'],
package_dir={'': 'src'},
url='https://github.com/GooeeIOT/python-evrythng',
license='MIT',
author='Lyle Scott, III',
author_email='lyle@digitalfoo.net',
description='A Python wrapper about the Evrythng REST API.'
)
|
Create a pip installable project
|
Create a pip installable project
|
Python
|
mit
|
GooeeIOT/python-evrythng
|
Create a pip installable project
|
from distutils.core import setup
setup(
name='python-evrythng',
version='0.1',
packages=['evrythng', 'evrythng.entities'],
package_dir={'': 'src'},
url='https://github.com/GooeeIOT/python-evrythng',
license='MIT',
author='Lyle Scott, III',
author_email='lyle@digitalfoo.net',
description='A Python wrapper about the Evrythng REST API.'
)
|
<commit_before><commit_msg>Create a pip installable project<commit_after>
|
from distutils.core import setup
setup(
name='python-evrythng',
version='0.1',
packages=['evrythng', 'evrythng.entities'],
package_dir={'': 'src'},
url='https://github.com/GooeeIOT/python-evrythng',
license='MIT',
author='Lyle Scott, III',
author_email='lyle@digitalfoo.net',
description='A Python wrapper about the Evrythng REST API.'
)
|
Create a pip installable projectfrom distutils.core import setup
setup(
name='python-evrythng',
version='0.1',
packages=['evrythng', 'evrythng.entities'],
package_dir={'': 'src'},
url='https://github.com/GooeeIOT/python-evrythng',
license='MIT',
author='Lyle Scott, III',
author_email='lyle@digitalfoo.net',
description='A Python wrapper about the Evrythng REST API.'
)
|
<commit_before><commit_msg>Create a pip installable project<commit_after>from distutils.core import setup
setup(
name='python-evrythng',
version='0.1',
packages=['evrythng', 'evrythng.entities'],
package_dir={'': 'src'},
url='https://github.com/GooeeIOT/python-evrythng',
license='MIT',
author='Lyle Scott, III',
author_email='lyle@digitalfoo.net',
description='A Python wrapper about the Evrythng REST API.'
)
|
|
f38c8fe1736e870a29f8c44a20c903f65579976b
|
taggert/tdata.py
|
taggert/tdata.py
|
from gi.repository import GObject
class TData(GObject.GObject):
imagedir = GObject.property(type=str)
lasttrackfolder = GObject.property(type=str)
tracktimezone = GObject.property(type=str)
alwaysthistimezone = GObject.property(type=bool, default=False)
markersize = GObject.property(type=int)
trackwidth = GObject.property(type=int)
def __init__(self):
GObject.GObject.__init__(self)
def connect_signals(self, handlers):
for prop, handler in handlers.items():
self.connect("notify::%s" % prop, handler)
GObject.type_register(TData)
|
Add TData class implementing GObject.GObject, meant for runtime data storage
|
Add TData class implementing GObject.GObject, meant for runtime data storage
|
Python
|
apache-2.0
|
tinuzz/taggert
|
Add TData class implementing GObject.GObject, meant for runtime data storage
|
from gi.repository import GObject
class TData(GObject.GObject):
imagedir = GObject.property(type=str)
lasttrackfolder = GObject.property(type=str)
tracktimezone = GObject.property(type=str)
alwaysthistimezone = GObject.property(type=bool, default=False)
markersize = GObject.property(type=int)
trackwidth = GObject.property(type=int)
def __init__(self):
GObject.GObject.__init__(self)
def connect_signals(self, handlers):
for prop, handler in handlers.items():
self.connect("notify::%s" % prop, handler)
GObject.type_register(TData)
|
<commit_before><commit_msg>Add TData class implementing GObject.GObject, meant for runtime data storage<commit_after>
|
from gi.repository import GObject
class TData(GObject.GObject):
imagedir = GObject.property(type=str)
lasttrackfolder = GObject.property(type=str)
tracktimezone = GObject.property(type=str)
alwaysthistimezone = GObject.property(type=bool, default=False)
markersize = GObject.property(type=int)
trackwidth = GObject.property(type=int)
def __init__(self):
GObject.GObject.__init__(self)
def connect_signals(self, handlers):
for prop, handler in handlers.items():
self.connect("notify::%s" % prop, handler)
GObject.type_register(TData)
|
Add TData class implementing GObject.GObject, meant for runtime data storagefrom gi.repository import GObject
class TData(GObject.GObject):
imagedir = GObject.property(type=str)
lasttrackfolder = GObject.property(type=str)
tracktimezone = GObject.property(type=str)
alwaysthistimezone = GObject.property(type=bool, default=False)
markersize = GObject.property(type=int)
trackwidth = GObject.property(type=int)
def __init__(self):
GObject.GObject.__init__(self)
def connect_signals(self, handlers):
for prop, handler in handlers.items():
self.connect("notify::%s" % prop, handler)
GObject.type_register(TData)
|
<commit_before><commit_msg>Add TData class implementing GObject.GObject, meant for runtime data storage<commit_after>from gi.repository import GObject
class TData(GObject.GObject):
imagedir = GObject.property(type=str)
lasttrackfolder = GObject.property(type=str)
tracktimezone = GObject.property(type=str)
alwaysthistimezone = GObject.property(type=bool, default=False)
markersize = GObject.property(type=int)
trackwidth = GObject.property(type=int)
def __init__(self):
GObject.GObject.__init__(self)
def connect_signals(self, handlers):
for prop, handler in handlers.items():
self.connect("notify::%s" % prop, handler)
GObject.type_register(TData)
|
|
c8df2f29359661c538cf07afe8f639c9c6159d00
|
py/construct-the-rectangle.py
|
py/construct-the-rectangle.py
|
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
ans = None
W = 1
while W * W <= area:
if area % W == 0:
ans = [area / W, W]
W += 1
return ans
|
Add py solution for 492. Construct the Rectangle
|
Add py solution for 492. Construct the Rectangle
492. Construct the Rectangle: https://leetcode.com/problems/construct-the-rectangle/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 492. Construct the Rectangle
492. Construct the Rectangle: https://leetcode.com/problems/construct-the-rectangle/
|
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
ans = None
W = 1
while W * W <= area:
if area % W == 0:
ans = [area / W, W]
W += 1
return ans
|
<commit_before><commit_msg>Add py solution for 492. Construct the Rectangle
492. Construct the Rectangle: https://leetcode.com/problems/construct-the-rectangle/<commit_after>
|
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
ans = None
W = 1
while W * W <= area:
if area % W == 0:
ans = [area / W, W]
W += 1
return ans
|
Add py solution for 492. Construct the Rectangle
492. Construct the Rectangle: https://leetcode.com/problems/construct-the-rectangle/class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
ans = None
W = 1
while W * W <= area:
if area % W == 0:
ans = [area / W, W]
W += 1
return ans
|
<commit_before><commit_msg>Add py solution for 492. Construct the Rectangle
492. Construct the Rectangle: https://leetcode.com/problems/construct-the-rectangle/<commit_after>class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
ans = None
W = 1
while W * W <= area:
if area % W == 0:
ans = [area / W, W]
W += 1
return ans
|
|
29c6d5b78e06e8487d9eccb47070b0fb36cbb821
|
scenarios/dindind_execute.py
|
scenarios/dindind_execute.py
|
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Prepares for nested docker, and executes a command."""
# TODO(Q-Lee): check the necessity of this once MountPropagation is available in
# prow: https://github.com/kubernetes/kubernetes/pull/59252
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Make important mounts r-shared, then run script and verify it exits 0."""
check("mount", "--make-rshared", "/lib/modules")
check("mount", "--make-rshared", "/sys")
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
Add a scenario to initiliaze nested dind
|
Add a scenario to initiliaze nested dind
|
Python
|
apache-2.0
|
jlowdermilk/test-infra,dims/test-infra,michelle192837/test-infra,monopole/test-infra,jlowdermilk/test-infra,monopole/test-infra,kargakis/test-infra,abgworrall/test-infra,michelle192837/test-infra,fejta/test-infra,kargakis/test-infra,ixdy/kubernetes-test-infra,jessfraz/test-infra,BenTheElder/test-infra,michelle192837/test-infra,kubernetes/test-infra,cjwagner/test-infra,shyamjvs/test-infra,shyamjvs/test-infra,brahmaroutu/test-infra,BenTheElder/test-infra,BenTheElder/test-infra,cjwagner/test-infra,cblecker/test-infra,krzyzacy/test-infra,monopole/test-infra,monopole/test-infra,mindprince/test-infra,fejta/test-infra,foxish/test-infra,mindprince/test-infra,fejta/test-infra,mindprince/test-infra,kargakis/test-infra,jlowdermilk/test-infra,michelle192837/test-infra,shyamjvs/test-infra,monopole/test-infra,pwittrock/test-infra,abgworrall/test-infra,ixdy/kubernetes-test-infra,cblecker/test-infra,shyamjvs/test-infra,jessfraz/test-infra,brahmaroutu/test-infra,brahmaroutu/test-infra,fejta/test-infra,foxish/test-infra,cblecker/test-infra,cjwagner/test-infra,brahmaroutu/test-infra,dims/test-infra,pwittrock/test-infra,jlowdermilk/test-infra,cjwagner/test-infra,shyamjvs/test-infra,kargakis/test-infra,abgworrall/test-infra,dims/test-infra,foxish/test-infra,pwittrock/test-infra,dims/test-infra,mindprince/test-infra,dims/test-infra,shyamjvs/test-infra,mindprince/test-infra,kargakis/test-infra,lavalamp/test-infra,krzyzacy/test-infra,cblecker/test-infra,monopole/test-infra,brahmaroutu/test-infra,pwittrock/test-infra,michelle192837/test-infra,krzyzacy/test-infra,lavalamp/test-infra,kubernetes/test-infra,jessfraz/test-infra,kargakis/test-infra,kubernetes/test-infra,lavalamp/test-infra,lavalamp/test-infra,fejta/test-infra,ixdy/kubernetes-test-infra,jlowdermilk/test-infra,ixdy/kubernetes-test-infra,BenTheElder/test-infra,jessfraz/test-infra,abgworrall/test-infra,krzyzacy/test-infra,BenTheElder/test-infra,brahmaroutu/test-infra,michelle192837/test-infra,kubernetes/test-infra,kubernetes/test-infra,foxish/test-infra,lavalamp/test-infra,jlowdermilk/test-infra,cblecker/test-infra,jessfraz/test-infra,kubernetes/test-infra,dims/test-infra,cblecker/test-infra,abgworrall/test-infra,fejta/test-infra,cjwagner/test-infra,foxish/test-infra,jessfraz/test-infra,ixdy/kubernetes-test-infra,lavalamp/test-infra,pwittrock/test-infra,krzyzacy/test-infra,cjwagner/test-infra,BenTheElder/test-infra,krzyzacy/test-infra
|
Add a scenario to initiliaze nested dind
|
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Prepares for nested docker, and executes a command."""
# TODO(Q-Lee): check the necessity of this once MountPropagation is available in
# prow: https://github.com/kubernetes/kubernetes/pull/59252
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Make important mounts r-shared, then run script and verify it exits 0."""
check("mount", "--make-rshared", "/lib/modules")
check("mount", "--make-rshared", "/sys")
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
<commit_before><commit_msg>Add a scenario to initiliaze nested dind<commit_after>
|
#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Prepares for nested docker, and executes a command."""
# TODO(Q-Lee): check the necessity of this once MountPropagation is available in
# prow: https://github.com/kubernetes/kubernetes/pull/59252
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Make important mounts r-shared, then run script and verify it exits 0."""
check("mount", "--make-rshared", "/lib/modules")
check("mount", "--make-rshared", "/sys")
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
Add a scenario to initiliaze nested dind#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Prepares for nested docker, and executes a command."""
# TODO(Q-Lee): check the necessity of this once MountPropagation is available in
# prow: https://github.com/kubernetes/kubernetes/pull/59252
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Make important mounts r-shared, then run script and verify it exits 0."""
check("mount", "--make-rshared", "/lib/modules")
check("mount", "--make-rshared", "/sys")
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
<commit_before><commit_msg>Add a scenario to initiliaze nested dind<commit_after>#!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Prepares for nested docker, and executes a command."""
# TODO(Q-Lee): check the necessity of this once MountPropagation is available in
# prow: https://github.com/kubernetes/kubernetes/pull/59252
import argparse
import os
import subprocess
import sys
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def main(envs, cmd):
"""Make important mounts r-shared, then run script and verify it exits 0."""
check("mount", "--make-rshared", "/lib/modules")
check("mount", "--make-rshared", "/sys")
for env in envs:
key, val = env.split('=', 1)
print >>sys.stderr, '%s=%s' % (key, val)
os.environ[key] = val
if not cmd:
raise ValueError(cmd)
check(*cmd)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--env', default=[], action='append')
PARSER.add_argument('cmd', nargs=1)
PARSER.add_argument('args', nargs='*')
ARGS = PARSER.parse_args()
main(ARGS.env, ARGS.cmd + ARGS.args)
|
|
8db2f138c83a998d0fa23b1e13f27d0823eba42e
|
utils/convert.py
|
utils/convert.py
|
import json
import argparse
import heapq
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.model.message import Message
from datetime import datetime
from collections import namedtuple
# For storing messages in an object that the heap can sort
MessageTuple = namedtuple('MessageTuple', 'timestamp tiebreak_value message')
parser = argparse.ArgumentParser(description='FB Message Archive Converter')
parser.add_argument('--in', dest='archivePath', required=True, help="Path to JSON archive")
parser.add_argument('--out', dest='outPath', required=True, help="Path to output file")
args = parser.parse_args()
with open(args.archivePath, 'r') as json_file:
data = json.load(json_file)
heap = []
message_senders = set()
tiebreaker_counter = 0
for message in data['messages']:
message_datetime = datetime.fromtimestamp(int(message['timestamp']))
if 'content' not in message:
# 'content' property contains the message text, other message types (stickers, media etc) use different
# properties which aren't handled here
continue
sender = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')
message_content = message['content'].encode('raw_unicode_escape').decode('utf-8')
new_message = "{date} {time} {sender} {message}\n".format(date=message_datetime.strftime(Message.DATE_FORMAT),
time=message_datetime.strftime(Message.TIME_FORMAT),
sender=sender.replace(' ', ''),
message=message_content.replace('\n', ' '))
heapq.heappush(heap, MessageTuple(timestamp=int(message['timestamp']), tiebreak_value=tiebreaker_counter,
message=new_message))
tiebreaker_counter += 1
sorted_messages = sorted(heap, key=lambda x: x[0])
# The messages were MessageTuples, now pull just the message string out
sorted_messages = [item.message for item in sorted_messages]
with open(args.outPath, 'w', encoding='utf-8') as out_file:
out_file.writelines(sorted_messages)
|
Add message format conversion utility script
|
Add message format conversion utility script
|
Python
|
apache-2.0
|
5agado/conversation-analyzer
|
Add message format conversion utility script
|
import json
import argparse
import heapq
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.model.message import Message
from datetime import datetime
from collections import namedtuple
# For storing messages in an object that the heap can sort
MessageTuple = namedtuple('MessageTuple', 'timestamp tiebreak_value message')
parser = argparse.ArgumentParser(description='FB Message Archive Converter')
parser.add_argument('--in', dest='archivePath', required=True, help="Path to JSON archive")
parser.add_argument('--out', dest='outPath', required=True, help="Path to output file")
args = parser.parse_args()
with open(args.archivePath, 'r') as json_file:
data = json.load(json_file)
heap = []
message_senders = set()
tiebreaker_counter = 0
for message in data['messages']:
message_datetime = datetime.fromtimestamp(int(message['timestamp']))
if 'content' not in message:
# 'content' property contains the message text, other message types (stickers, media etc) use different
# properties which aren't handled here
continue
sender = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')
message_content = message['content'].encode('raw_unicode_escape').decode('utf-8')
new_message = "{date} {time} {sender} {message}\n".format(date=message_datetime.strftime(Message.DATE_FORMAT),
time=message_datetime.strftime(Message.TIME_FORMAT),
sender=sender.replace(' ', ''),
message=message_content.replace('\n', ' '))
heapq.heappush(heap, MessageTuple(timestamp=int(message['timestamp']), tiebreak_value=tiebreaker_counter,
message=new_message))
tiebreaker_counter += 1
sorted_messages = sorted(heap, key=lambda x: x[0])
# The messages were MessageTuples, now pull just the message string out
sorted_messages = [item.message for item in sorted_messages]
with open(args.outPath, 'w', encoding='utf-8') as out_file:
out_file.writelines(sorted_messages)
|
<commit_before><commit_msg>Add message format conversion utility script<commit_after>
|
import json
import argparse
import heapq
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.model.message import Message
from datetime import datetime
from collections import namedtuple
# For storing messages in an object that the heap can sort
MessageTuple = namedtuple('MessageTuple', 'timestamp tiebreak_value message')
parser = argparse.ArgumentParser(description='FB Message Archive Converter')
parser.add_argument('--in', dest='archivePath', required=True, help="Path to JSON archive")
parser.add_argument('--out', dest='outPath', required=True, help="Path to output file")
args = parser.parse_args()
with open(args.archivePath, 'r') as json_file:
data = json.load(json_file)
heap = []
message_senders = set()
tiebreaker_counter = 0
for message in data['messages']:
message_datetime = datetime.fromtimestamp(int(message['timestamp']))
if 'content' not in message:
# 'content' property contains the message text, other message types (stickers, media etc) use different
# properties which aren't handled here
continue
sender = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')
message_content = message['content'].encode('raw_unicode_escape').decode('utf-8')
new_message = "{date} {time} {sender} {message}\n".format(date=message_datetime.strftime(Message.DATE_FORMAT),
time=message_datetime.strftime(Message.TIME_FORMAT),
sender=sender.replace(' ', ''),
message=message_content.replace('\n', ' '))
heapq.heappush(heap, MessageTuple(timestamp=int(message['timestamp']), tiebreak_value=tiebreaker_counter,
message=new_message))
tiebreaker_counter += 1
sorted_messages = sorted(heap, key=lambda x: x[0])
# The messages were MessageTuples, now pull just the message string out
sorted_messages = [item.message for item in sorted_messages]
with open(args.outPath, 'w', encoding='utf-8') as out_file:
out_file.writelines(sorted_messages)
|
Add message format conversion utility scriptimport json
import argparse
import heapq
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.model.message import Message
from datetime import datetime
from collections import namedtuple
# For storing messages in an object that the heap can sort
MessageTuple = namedtuple('MessageTuple', 'timestamp tiebreak_value message')
parser = argparse.ArgumentParser(description='FB Message Archive Converter')
parser.add_argument('--in', dest='archivePath', required=True, help="Path to JSON archive")
parser.add_argument('--out', dest='outPath', required=True, help="Path to output file")
args = parser.parse_args()
with open(args.archivePath, 'r') as json_file:
data = json.load(json_file)
heap = []
message_senders = set()
tiebreaker_counter = 0
for message in data['messages']:
message_datetime = datetime.fromtimestamp(int(message['timestamp']))
if 'content' not in message:
# 'content' property contains the message text, other message types (stickers, media etc) use different
# properties which aren't handled here
continue
sender = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')
message_content = message['content'].encode('raw_unicode_escape').decode('utf-8')
new_message = "{date} {time} {sender} {message}\n".format(date=message_datetime.strftime(Message.DATE_FORMAT),
time=message_datetime.strftime(Message.TIME_FORMAT),
sender=sender.replace(' ', ''),
message=message_content.replace('\n', ' '))
heapq.heappush(heap, MessageTuple(timestamp=int(message['timestamp']), tiebreak_value=tiebreaker_counter,
message=new_message))
tiebreaker_counter += 1
sorted_messages = sorted(heap, key=lambda x: x[0])
# The messages were MessageTuples, now pull just the message string out
sorted_messages = [item.message for item in sorted_messages]
with open(args.outPath, 'w', encoding='utf-8') as out_file:
out_file.writelines(sorted_messages)
|
<commit_before><commit_msg>Add message format conversion utility script<commit_after>import json
import argparse
import heapq
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from src.model.message import Message
from datetime import datetime
from collections import namedtuple
# For storing messages in an object that the heap can sort
MessageTuple = namedtuple('MessageTuple', 'timestamp tiebreak_value message')
parser = argparse.ArgumentParser(description='FB Message Archive Converter')
parser.add_argument('--in', dest='archivePath', required=True, help="Path to JSON archive")
parser.add_argument('--out', dest='outPath', required=True, help="Path to output file")
args = parser.parse_args()
with open(args.archivePath, 'r') as json_file:
data = json.load(json_file)
heap = []
message_senders = set()
tiebreaker_counter = 0
for message in data['messages']:
message_datetime = datetime.fromtimestamp(int(message['timestamp']))
if 'content' not in message:
# 'content' property contains the message text, other message types (stickers, media etc) use different
# properties which aren't handled here
continue
sender = message['sender_name'].encode('raw_unicode_escape').decode('utf-8')
message_content = message['content'].encode('raw_unicode_escape').decode('utf-8')
new_message = "{date} {time} {sender} {message}\n".format(date=message_datetime.strftime(Message.DATE_FORMAT),
time=message_datetime.strftime(Message.TIME_FORMAT),
sender=sender.replace(' ', ''),
message=message_content.replace('\n', ' '))
heapq.heappush(heap, MessageTuple(timestamp=int(message['timestamp']), tiebreak_value=tiebreaker_counter,
message=new_message))
tiebreaker_counter += 1
sorted_messages = sorted(heap, key=lambda x: x[0])
# The messages were MessageTuples, now pull just the message string out
sorted_messages = [item.message for item in sorted_messages]
with open(args.outPath, 'w', encoding='utf-8') as out_file:
out_file.writelines(sorted_messages)
|
|
7b48044f19a50e4b84810a5e98d93cd537bea5c9
|
ichnaea/gunicorn_config.py
|
ichnaea/gunicorn_config.py
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 100k requests to prevent memory leaks
# from effecting us
max_requests = 100000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 10m requests to prevent memory leaks
# from effecting us, at 1000 req/s this means recycle every 2.8 hours
max_requests = 10000000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
Increase gunicorn max_requests to avoid process churn at high request rates.
|
Increase gunicorn max_requests to avoid process churn at high request rates.
|
Python
|
apache-2.0
|
mozilla/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 100k requests to prevent memory leaks
# from effecting us
max_requests = 100000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
Increase gunicorn max_requests to avoid process churn at high request rates.
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 10m requests to prevent memory leaks
# from effecting us, at 1000 req/s this means recycle every 2.8 hours
max_requests = 10000000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
<commit_before># This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 100k requests to prevent memory leaks
# from effecting us
max_requests = 100000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
<commit_msg>Increase gunicorn max_requests to avoid process churn at high request rates.<commit_after>
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 10m requests to prevent memory leaks
# from effecting us, at 1000 req/s this means recycle every 2.8 hours
max_requests = 10000000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 100k requests to prevent memory leaks
# from effecting us
max_requests = 100000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
Increase gunicorn max_requests to avoid process churn at high request rates.# This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 10m requests to prevent memory leaks
# from effecting us, at 1000 req/s this means recycle every 2.8 hours
max_requests = 10000000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
<commit_before># This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 100k requests to prevent memory leaks
# from effecting us
max_requests = 100000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
<commit_msg>Increase gunicorn max_requests to avoid process churn at high request rates.<commit_after># This file contains gunicorn configuration setttings, as described at
# http://docs.gunicorn.org/en/latest/settings.html
# The file is loaded via the -c ichnaea.gunicorn_config command line option
# Use our own Gevent worker
worker_class = "ichnaea.gunicorn_worker.LocationGeventWorker"
# Maximum number of simultaneous greenlets,
# limited by number of DB and Redis connections
worker_connections = 50
# Set timeout to the same value as the default one from Amazon ELB (60 secs).
timeout = 60
# Disable keep-alive
keepalive = 0
# Recycle worker processes after 10m requests to prevent memory leaks
# from effecting us, at 1000 req/s this means recycle every 2.8 hours
max_requests = 10000000
# Log errors to stderr
errorlog = "-"
# Avoid too much output on the console
loglevel = "warning"
def post_worker_init(worker):
from random import randint
# Use 10% jitter, to prevent all workers from restarting at once,
# as they get an almost equal number of requests
jitter = randint(0, max_requests // 10)
worker.max_requests += jitter
# Actually initialize the application
worker.wsgi(None, None)
|
cdd238e7fefbe4dc57db89f8362fa7dce7094f92
|
tests/test_time_utils.py
|
tests/test_time_utils.py
|
from datetime import datetime
import unittest
from pypercube import time_utils
class TestMetric(unittest.TestCase):
def setUp(self):
self.now = datetime(2012, 7, 6, 20, 33, 16, 573225)
def test_yesterday(self):
self.assertEqual(time_utils.yesterday(self.now),
datetime(2012, 7, 5, 20, 33, 16, 573225))
def test_last_week(self):
self.assertEqual(time_utils.last_week(self.now),
datetime(2012, 6, 29, 20, 33, 16, 573225))
def test_start_of_month(self):
self.assertEqual(time_utils.start_of_month(self.now),
datetime(2012, 7, 1))
def test_floor(self):
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_10_SEC),
datetime(2012, 7, 6, 20, 33, 10))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_MIN),
datetime(2012, 7, 6, 20, 33))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_5_MIN),
datetime(2012, 7, 6, 20, 30))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_HOUR),
datetime(2012, 7, 6, 20))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_DAY),
datetime(2012, 7, 6))
self.assertRaisesRegexp(ValueError, "is not a valid resolution",
time_utils.floor, self.now, 12345)
|
Add unit tests for time_utils.
|
Add unit tests for time_utils.
|
Python
|
bsd-3-clause
|
sbuss/pypercube
|
Add unit tests for time_utils.
|
from datetime import datetime
import unittest
from pypercube import time_utils
class TestMetric(unittest.TestCase):
def setUp(self):
self.now = datetime(2012, 7, 6, 20, 33, 16, 573225)
def test_yesterday(self):
self.assertEqual(time_utils.yesterday(self.now),
datetime(2012, 7, 5, 20, 33, 16, 573225))
def test_last_week(self):
self.assertEqual(time_utils.last_week(self.now),
datetime(2012, 6, 29, 20, 33, 16, 573225))
def test_start_of_month(self):
self.assertEqual(time_utils.start_of_month(self.now),
datetime(2012, 7, 1))
def test_floor(self):
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_10_SEC),
datetime(2012, 7, 6, 20, 33, 10))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_MIN),
datetime(2012, 7, 6, 20, 33))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_5_MIN),
datetime(2012, 7, 6, 20, 30))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_HOUR),
datetime(2012, 7, 6, 20))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_DAY),
datetime(2012, 7, 6))
self.assertRaisesRegexp(ValueError, "is not a valid resolution",
time_utils.floor, self.now, 12345)
|
<commit_before><commit_msg>Add unit tests for time_utils.<commit_after>
|
from datetime import datetime
import unittest
from pypercube import time_utils
class TestMetric(unittest.TestCase):
def setUp(self):
self.now = datetime(2012, 7, 6, 20, 33, 16, 573225)
def test_yesterday(self):
self.assertEqual(time_utils.yesterday(self.now),
datetime(2012, 7, 5, 20, 33, 16, 573225))
def test_last_week(self):
self.assertEqual(time_utils.last_week(self.now),
datetime(2012, 6, 29, 20, 33, 16, 573225))
def test_start_of_month(self):
self.assertEqual(time_utils.start_of_month(self.now),
datetime(2012, 7, 1))
def test_floor(self):
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_10_SEC),
datetime(2012, 7, 6, 20, 33, 10))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_MIN),
datetime(2012, 7, 6, 20, 33))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_5_MIN),
datetime(2012, 7, 6, 20, 30))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_HOUR),
datetime(2012, 7, 6, 20))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_DAY),
datetime(2012, 7, 6))
self.assertRaisesRegexp(ValueError, "is not a valid resolution",
time_utils.floor, self.now, 12345)
|
Add unit tests for time_utils.from datetime import datetime
import unittest
from pypercube import time_utils
class TestMetric(unittest.TestCase):
def setUp(self):
self.now = datetime(2012, 7, 6, 20, 33, 16, 573225)
def test_yesterday(self):
self.assertEqual(time_utils.yesterday(self.now),
datetime(2012, 7, 5, 20, 33, 16, 573225))
def test_last_week(self):
self.assertEqual(time_utils.last_week(self.now),
datetime(2012, 6, 29, 20, 33, 16, 573225))
def test_start_of_month(self):
self.assertEqual(time_utils.start_of_month(self.now),
datetime(2012, 7, 1))
def test_floor(self):
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_10_SEC),
datetime(2012, 7, 6, 20, 33, 10))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_MIN),
datetime(2012, 7, 6, 20, 33))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_5_MIN),
datetime(2012, 7, 6, 20, 30))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_HOUR),
datetime(2012, 7, 6, 20))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_DAY),
datetime(2012, 7, 6))
self.assertRaisesRegexp(ValueError, "is not a valid resolution",
time_utils.floor, self.now, 12345)
|
<commit_before><commit_msg>Add unit tests for time_utils.<commit_after>from datetime import datetime
import unittest
from pypercube import time_utils
class TestMetric(unittest.TestCase):
def setUp(self):
self.now = datetime(2012, 7, 6, 20, 33, 16, 573225)
def test_yesterday(self):
self.assertEqual(time_utils.yesterday(self.now),
datetime(2012, 7, 5, 20, 33, 16, 573225))
def test_last_week(self):
self.assertEqual(time_utils.last_week(self.now),
datetime(2012, 6, 29, 20, 33, 16, 573225))
def test_start_of_month(self):
self.assertEqual(time_utils.start_of_month(self.now),
datetime(2012, 7, 1))
def test_floor(self):
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_10_SEC),
datetime(2012, 7, 6, 20, 33, 10))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_MIN),
datetime(2012, 7, 6, 20, 33))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_5_MIN),
datetime(2012, 7, 6, 20, 30))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_HOUR),
datetime(2012, 7, 6, 20))
self.assertEqual(time_utils.floor(self.now, time_utils.STEP_1_DAY),
datetime(2012, 7, 6))
self.assertRaisesRegexp(ValueError, "is not a valid resolution",
time_utils.floor, self.now, 12345)
|
|
e577d53840137d7849b68420553d16a942280ea0
|
cax/clear.py
|
cax/clear.py
|
f
import config
def copy(f1, f2,
server,
username):
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(server,
username=username)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport())
scp.put(f1, f2,
recursive=True)
scp.close()
def upload():
# Grab the Run DB so we can query it
collection = config.mongo_collection()
# For each TPC run, check if should be uploaded
for doc in collection.find({'detector' : 'tpc'}):
here = None
copies = []
# Iterate over data locations to know status
for datum in doc['data']:
# Is host known?
if 'host' not in datum:
continue
if datum['status'] != 'transferred':
continue
# If the location refers to here
if datum['host'] == config.get_hostname():
# Was data transferred here?
if datum['status'] == 'transferred':
# If so, store info on it.
here = datum
else:
copies.append(datum)
print(here, copies)
if __name__ == "__main__":
clear()
|
Clear DAQ if enough copies
|
Clear DAQ if enough copies
|
Python
|
isc
|
XENON1T/cax,XENON1T/cax
|
Clear DAQ if enough copies
|
f
import config
def copy(f1, f2,
server,
username):
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(server,
username=username)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport())
scp.put(f1, f2,
recursive=True)
scp.close()
def upload():
# Grab the Run DB so we can query it
collection = config.mongo_collection()
# For each TPC run, check if should be uploaded
for doc in collection.find({'detector' : 'tpc'}):
here = None
copies = []
# Iterate over data locations to know status
for datum in doc['data']:
# Is host known?
if 'host' not in datum:
continue
if datum['status'] != 'transferred':
continue
# If the location refers to here
if datum['host'] == config.get_hostname():
# Was data transferred here?
if datum['status'] == 'transferred':
# If so, store info on it.
here = datum
else:
copies.append(datum)
print(here, copies)
if __name__ == "__main__":
clear()
|
<commit_before><commit_msg>Clear DAQ if enough copies<commit_after>
|
f
import config
def copy(f1, f2,
server,
username):
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(server,
username=username)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport())
scp.put(f1, f2,
recursive=True)
scp.close()
def upload():
# Grab the Run DB so we can query it
collection = config.mongo_collection()
# For each TPC run, check if should be uploaded
for doc in collection.find({'detector' : 'tpc'}):
here = None
copies = []
# Iterate over data locations to know status
for datum in doc['data']:
# Is host known?
if 'host' not in datum:
continue
if datum['status'] != 'transferred':
continue
# If the location refers to here
if datum['host'] == config.get_hostname():
# Was data transferred here?
if datum['status'] == 'transferred':
# If so, store info on it.
here = datum
else:
copies.append(datum)
print(here, copies)
if __name__ == "__main__":
clear()
|
Clear DAQ if enough copiesf
import config
def copy(f1, f2,
server,
username):
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(server,
username=username)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport())
scp.put(f1, f2,
recursive=True)
scp.close()
def upload():
# Grab the Run DB so we can query it
collection = config.mongo_collection()
# For each TPC run, check if should be uploaded
for doc in collection.find({'detector' : 'tpc'}):
here = None
copies = []
# Iterate over data locations to know status
for datum in doc['data']:
# Is host known?
if 'host' not in datum:
continue
if datum['status'] != 'transferred':
continue
# If the location refers to here
if datum['host'] == config.get_hostname():
# Was data transferred here?
if datum['status'] == 'transferred':
# If so, store info on it.
here = datum
else:
copies.append(datum)
print(here, copies)
if __name__ == "__main__":
clear()
|
<commit_before><commit_msg>Clear DAQ if enough copies<commit_after>f
import config
def copy(f1, f2,
server,
username):
util.log_to_file('ssh.log')
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.connect(server,
username=username)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport())
scp.put(f1, f2,
recursive=True)
scp.close()
def upload():
# Grab the Run DB so we can query it
collection = config.mongo_collection()
# For each TPC run, check if should be uploaded
for doc in collection.find({'detector' : 'tpc'}):
here = None
copies = []
# Iterate over data locations to know status
for datum in doc['data']:
# Is host known?
if 'host' not in datum:
continue
if datum['status'] != 'transferred':
continue
# If the location refers to here
if datum['host'] == config.get_hostname():
# Was data transferred here?
if datum['status'] == 'transferred':
# If so, store info on it.
here = datum
else:
copies.append(datum)
print(here, copies)
if __name__ == "__main__":
clear()
|
|
616af50809bbc1eb64175a233f08c06426e8b636
|
Designs/shopping_list.py
|
Designs/shopping_list.py
|
from person import Person
class ShoppingList:
def create(self,list_name,description):
with open('shopping_list.txt','a') as sl:
sl.write(list_name+' ' +' '+description)
sl.write('\n')
def show(self):
slist=[]
with open('shopping_list.txt','r') as sl:
#just return a list
for records in sl:
slist.append(records)
return slist
|
Add shopping list class to
|
[Chore] Add shopping list class to
|
Python
|
mit
|
AndersonMasese/Myshop,AndersonMasese/Myshop,AndersonMasese/Myshop
|
[Chore] Add shopping list class to
|
from person import Person
class ShoppingList:
def create(self,list_name,description):
with open('shopping_list.txt','a') as sl:
sl.write(list_name+' ' +' '+description)
sl.write('\n')
def show(self):
slist=[]
with open('shopping_list.txt','r') as sl:
#just return a list
for records in sl:
slist.append(records)
return slist
|
<commit_before><commit_msg>[Chore] Add shopping list class to<commit_after>
|
from person import Person
class ShoppingList:
def create(self,list_name,description):
with open('shopping_list.txt','a') as sl:
sl.write(list_name+' ' +' '+description)
sl.write('\n')
def show(self):
slist=[]
with open('shopping_list.txt','r') as sl:
#just return a list
for records in sl:
slist.append(records)
return slist
|
[Chore] Add shopping list class tofrom person import Person
class ShoppingList:
def create(self,list_name,description):
with open('shopping_list.txt','a') as sl:
sl.write(list_name+' ' +' '+description)
sl.write('\n')
def show(self):
slist=[]
with open('shopping_list.txt','r') as sl:
#just return a list
for records in sl:
slist.append(records)
return slist
|
<commit_before><commit_msg>[Chore] Add shopping list class to<commit_after>from person import Person
class ShoppingList:
def create(self,list_name,description):
with open('shopping_list.txt','a') as sl:
sl.write(list_name+' ' +' '+description)
sl.write('\n')
def show(self):
slist=[]
with open('shopping_list.txt','r') as sl:
#just return a list
for records in sl:
slist.append(records)
return slist
|
|
5b8ca7bf4a3383eaddc148caffcf89e73efc743d
|
tests/test_converters.py
|
tests/test_converters.py
|
import pytest
from netsgiro.converters import value_or_none, truthy_or_none, to_bool
values = [
(int, None, None, None),
(int, 1, 1, 1),
(int, 1.5, 1, 1),
(float, 1.5, 1.5, 1.5),
(bool, 1.5, True, True),
(bool, 0, False, None), # different
]
@pytest.mark.parametrize('c, v, v1, v2', values)
def test_value_or_none(c, v, v1, v2):
"""
Test the value or none and truthy or none converters.
They're almost identical.
"""
assert value_or_none(c)(v) == v1
assert truthy_or_none(c)(v) == v2
def test_to_bool():
assert to_bool(True) is True
assert to_bool(False) is False
assert to_bool('J') is True
assert to_bool('N') is False
for v in [None, 'S', '', [], {}]:
with pytest.raises(ValueError, match="Expected 'J' or 'N', got "):
to_bool(v)
|
Add tests for untested converters
|
tests: Add tests for untested converters
|
Python
|
apache-2.0
|
otovo/python-netsgiro
|
tests: Add tests for untested converters
|
import pytest
from netsgiro.converters import value_or_none, truthy_or_none, to_bool
values = [
(int, None, None, None),
(int, 1, 1, 1),
(int, 1.5, 1, 1),
(float, 1.5, 1.5, 1.5),
(bool, 1.5, True, True),
(bool, 0, False, None), # different
]
@pytest.mark.parametrize('c, v, v1, v2', values)
def test_value_or_none(c, v, v1, v2):
"""
Test the value or none and truthy or none converters.
They're almost identical.
"""
assert value_or_none(c)(v) == v1
assert truthy_or_none(c)(v) == v2
def test_to_bool():
assert to_bool(True) is True
assert to_bool(False) is False
assert to_bool('J') is True
assert to_bool('N') is False
for v in [None, 'S', '', [], {}]:
with pytest.raises(ValueError, match="Expected 'J' or 'N', got "):
to_bool(v)
|
<commit_before><commit_msg>tests: Add tests for untested converters<commit_after>
|
import pytest
from netsgiro.converters import value_or_none, truthy_or_none, to_bool
values = [
(int, None, None, None),
(int, 1, 1, 1),
(int, 1.5, 1, 1),
(float, 1.5, 1.5, 1.5),
(bool, 1.5, True, True),
(bool, 0, False, None), # different
]
@pytest.mark.parametrize('c, v, v1, v2', values)
def test_value_or_none(c, v, v1, v2):
"""
Test the value or none and truthy or none converters.
They're almost identical.
"""
assert value_or_none(c)(v) == v1
assert truthy_or_none(c)(v) == v2
def test_to_bool():
assert to_bool(True) is True
assert to_bool(False) is False
assert to_bool('J') is True
assert to_bool('N') is False
for v in [None, 'S', '', [], {}]:
with pytest.raises(ValueError, match="Expected 'J' or 'N', got "):
to_bool(v)
|
tests: Add tests for untested convertersimport pytest
from netsgiro.converters import value_or_none, truthy_or_none, to_bool
values = [
(int, None, None, None),
(int, 1, 1, 1),
(int, 1.5, 1, 1),
(float, 1.5, 1.5, 1.5),
(bool, 1.5, True, True),
(bool, 0, False, None), # different
]
@pytest.mark.parametrize('c, v, v1, v2', values)
def test_value_or_none(c, v, v1, v2):
"""
Test the value or none and truthy or none converters.
They're almost identical.
"""
assert value_or_none(c)(v) == v1
assert truthy_or_none(c)(v) == v2
def test_to_bool():
assert to_bool(True) is True
assert to_bool(False) is False
assert to_bool('J') is True
assert to_bool('N') is False
for v in [None, 'S', '', [], {}]:
with pytest.raises(ValueError, match="Expected 'J' or 'N', got "):
to_bool(v)
|
<commit_before><commit_msg>tests: Add tests for untested converters<commit_after>import pytest
from netsgiro.converters import value_or_none, truthy_or_none, to_bool
values = [
(int, None, None, None),
(int, 1, 1, 1),
(int, 1.5, 1, 1),
(float, 1.5, 1.5, 1.5),
(bool, 1.5, True, True),
(bool, 0, False, None), # different
]
@pytest.mark.parametrize('c, v, v1, v2', values)
def test_value_or_none(c, v, v1, v2):
"""
Test the value or none and truthy or none converters.
They're almost identical.
"""
assert value_or_none(c)(v) == v1
assert truthy_or_none(c)(v) == v2
def test_to_bool():
assert to_bool(True) is True
assert to_bool(False) is False
assert to_bool('J') is True
assert to_bool('N') is False
for v in [None, 'S', '', [], {}]:
with pytest.raises(ValueError, match="Expected 'J' or 'N', got "):
to_bool(v)
|
|
ab23323eb253b732c8f3ed08420be754790d2e76
|
utils/glediator_patch.py
|
utils/glediator_patch.py
|
import sys
if (len(sys.argv) <= 3):
print "Usage: %s <IP> <width> <height>" % (sys.argv[0])
sys.exit()
ip = sys.argv[1].split(".")
if (len(ip) < 4):
print "IP should be of the form: xxx.xxx.xxx.xxx"
sys.exit()
width = int(sys.argv[2])
height = int(sys.argv[3])
pixels = width * height
bytes = pixels * 3
max_bytes_per_universe = 512
pixels_per_universe=max_bytes_per_universe / 3
universes = (pixels + pixels_per_universe - 1) / pixels_per_universe
print "Patch_Matrix_Size_Y=%d" % height
print "Patch_Matrix_Size_X=%d" % width
print "Patch_Num_Unis=%d" % universes
for universeId in range(0,universes):
print "Patch_Uni_ID_%d_IP1=%s" % (universeId, ip[0])
print "Patch_Uni_ID_%d_IP2=%s" % (universeId, ip[1])
print "Patch_Uni_ID_%d_IP3=%s" % (universeId, ip[2])
print "Patch_Uni_ID_%d_IP4=%s" % (universeId, ip[3])
print "Patch_Uni_ID_%d_Net_Nr=%d" % (universeId, universeId >> 8)
print "Patch_Uni_ID_%d_Sub_Net_Nr=%d" % (universeId, (universeId >> 4) & 0xF)
print "Patch_Uni_ID_%d_Uni_Nr=%d" % (universeId, universeId & 0xF)
print "Patch_Uni_ID_%d_Num_Ch=%d" % (universeId, min(pixels_per_universe, pixels - (pixels_per_universe * universeId)) * 3 )
for x in range(0, width):
for y in range(0, height):
offset = x + (y * width)
universeId = offset / pixels_per_universe
universe_offset = offset % pixels_per_universe
print "Patch_Pixel_X_%d_Y_%d_Uni_ID=%d" % (x,y,universeId)
print "Patch_Pixel_X_%d_Y_%d_Ch_R=%d" % (x,y,(universe_offset * 3)+0)
print "Patch_Pixel_X_%d_Y_%d_Ch_G=%d" % (x,y,(universe_offset * 3)+2)
print "Patch_Pixel_X_%d_Y_%d_Ch_B=%d" % (x,y,(universe_offset * 3)+1)
|
Add patch-file generator for GLEDiator
|
Add patch-file generator for GLEDiator
|
Python
|
mit
|
Frans-Willem/LEDMatrixHUB75,Frans-Willem/LEDMatrixHUB75,Frans-Willem/LEDMatrixHUB75,Frans-Willem/LEDMatrixHUB75
|
Add patch-file generator for GLEDiator
|
import sys
if (len(sys.argv) <= 3):
print "Usage: %s <IP> <width> <height>" % (sys.argv[0])
sys.exit()
ip = sys.argv[1].split(".")
if (len(ip) < 4):
print "IP should be of the form: xxx.xxx.xxx.xxx"
sys.exit()
width = int(sys.argv[2])
height = int(sys.argv[3])
pixels = width * height
bytes = pixels * 3
max_bytes_per_universe = 512
pixels_per_universe=max_bytes_per_universe / 3
universes = (pixels + pixels_per_universe - 1) / pixels_per_universe
print "Patch_Matrix_Size_Y=%d" % height
print "Patch_Matrix_Size_X=%d" % width
print "Patch_Num_Unis=%d" % universes
for universeId in range(0,universes):
print "Patch_Uni_ID_%d_IP1=%s" % (universeId, ip[0])
print "Patch_Uni_ID_%d_IP2=%s" % (universeId, ip[1])
print "Patch_Uni_ID_%d_IP3=%s" % (universeId, ip[2])
print "Patch_Uni_ID_%d_IP4=%s" % (universeId, ip[3])
print "Patch_Uni_ID_%d_Net_Nr=%d" % (universeId, universeId >> 8)
print "Patch_Uni_ID_%d_Sub_Net_Nr=%d" % (universeId, (universeId >> 4) & 0xF)
print "Patch_Uni_ID_%d_Uni_Nr=%d" % (universeId, universeId & 0xF)
print "Patch_Uni_ID_%d_Num_Ch=%d" % (universeId, min(pixels_per_universe, pixels - (pixels_per_universe * universeId)) * 3 )
for x in range(0, width):
for y in range(0, height):
offset = x + (y * width)
universeId = offset / pixels_per_universe
universe_offset = offset % pixels_per_universe
print "Patch_Pixel_X_%d_Y_%d_Uni_ID=%d" % (x,y,universeId)
print "Patch_Pixel_X_%d_Y_%d_Ch_R=%d" % (x,y,(universe_offset * 3)+0)
print "Patch_Pixel_X_%d_Y_%d_Ch_G=%d" % (x,y,(universe_offset * 3)+2)
print "Patch_Pixel_X_%d_Y_%d_Ch_B=%d" % (x,y,(universe_offset * 3)+1)
|
<commit_before><commit_msg>Add patch-file generator for GLEDiator<commit_after>
|
import sys
if (len(sys.argv) <= 3):
print "Usage: %s <IP> <width> <height>" % (sys.argv[0])
sys.exit()
ip = sys.argv[1].split(".")
if (len(ip) < 4):
print "IP should be of the form: xxx.xxx.xxx.xxx"
sys.exit()
width = int(sys.argv[2])
height = int(sys.argv[3])
pixels = width * height
bytes = pixels * 3
max_bytes_per_universe = 512
pixels_per_universe=max_bytes_per_universe / 3
universes = (pixels + pixels_per_universe - 1) / pixels_per_universe
print "Patch_Matrix_Size_Y=%d" % height
print "Patch_Matrix_Size_X=%d" % width
print "Patch_Num_Unis=%d" % universes
for universeId in range(0,universes):
print "Patch_Uni_ID_%d_IP1=%s" % (universeId, ip[0])
print "Patch_Uni_ID_%d_IP2=%s" % (universeId, ip[1])
print "Patch_Uni_ID_%d_IP3=%s" % (universeId, ip[2])
print "Patch_Uni_ID_%d_IP4=%s" % (universeId, ip[3])
print "Patch_Uni_ID_%d_Net_Nr=%d" % (universeId, universeId >> 8)
print "Patch_Uni_ID_%d_Sub_Net_Nr=%d" % (universeId, (universeId >> 4) & 0xF)
print "Patch_Uni_ID_%d_Uni_Nr=%d" % (universeId, universeId & 0xF)
print "Patch_Uni_ID_%d_Num_Ch=%d" % (universeId, min(pixels_per_universe, pixels - (pixels_per_universe * universeId)) * 3 )
for x in range(0, width):
for y in range(0, height):
offset = x + (y * width)
universeId = offset / pixels_per_universe
universe_offset = offset % pixels_per_universe
print "Patch_Pixel_X_%d_Y_%d_Uni_ID=%d" % (x,y,universeId)
print "Patch_Pixel_X_%d_Y_%d_Ch_R=%d" % (x,y,(universe_offset * 3)+0)
print "Patch_Pixel_X_%d_Y_%d_Ch_G=%d" % (x,y,(universe_offset * 3)+2)
print "Patch_Pixel_X_%d_Y_%d_Ch_B=%d" % (x,y,(universe_offset * 3)+1)
|
Add patch-file generator for GLEDiatorimport sys
if (len(sys.argv) <= 3):
print "Usage: %s <IP> <width> <height>" % (sys.argv[0])
sys.exit()
ip = sys.argv[1].split(".")
if (len(ip) < 4):
print "IP should be of the form: xxx.xxx.xxx.xxx"
sys.exit()
width = int(sys.argv[2])
height = int(sys.argv[3])
pixels = width * height
bytes = pixels * 3
max_bytes_per_universe = 512
pixels_per_universe=max_bytes_per_universe / 3
universes = (pixels + pixels_per_universe - 1) / pixels_per_universe
print "Patch_Matrix_Size_Y=%d" % height
print "Patch_Matrix_Size_X=%d" % width
print "Patch_Num_Unis=%d" % universes
for universeId in range(0,universes):
print "Patch_Uni_ID_%d_IP1=%s" % (universeId, ip[0])
print "Patch_Uni_ID_%d_IP2=%s" % (universeId, ip[1])
print "Patch_Uni_ID_%d_IP3=%s" % (universeId, ip[2])
print "Patch_Uni_ID_%d_IP4=%s" % (universeId, ip[3])
print "Patch_Uni_ID_%d_Net_Nr=%d" % (universeId, universeId >> 8)
print "Patch_Uni_ID_%d_Sub_Net_Nr=%d" % (universeId, (universeId >> 4) & 0xF)
print "Patch_Uni_ID_%d_Uni_Nr=%d" % (universeId, universeId & 0xF)
print "Patch_Uni_ID_%d_Num_Ch=%d" % (universeId, min(pixels_per_universe, pixels - (pixels_per_universe * universeId)) * 3 )
for x in range(0, width):
for y in range(0, height):
offset = x + (y * width)
universeId = offset / pixels_per_universe
universe_offset = offset % pixels_per_universe
print "Patch_Pixel_X_%d_Y_%d_Uni_ID=%d" % (x,y,universeId)
print "Patch_Pixel_X_%d_Y_%d_Ch_R=%d" % (x,y,(universe_offset * 3)+0)
print "Patch_Pixel_X_%d_Y_%d_Ch_G=%d" % (x,y,(universe_offset * 3)+2)
print "Patch_Pixel_X_%d_Y_%d_Ch_B=%d" % (x,y,(universe_offset * 3)+1)
|
<commit_before><commit_msg>Add patch-file generator for GLEDiator<commit_after>import sys
if (len(sys.argv) <= 3):
print "Usage: %s <IP> <width> <height>" % (sys.argv[0])
sys.exit()
ip = sys.argv[1].split(".")
if (len(ip) < 4):
print "IP should be of the form: xxx.xxx.xxx.xxx"
sys.exit()
width = int(sys.argv[2])
height = int(sys.argv[3])
pixels = width * height
bytes = pixels * 3
max_bytes_per_universe = 512
pixels_per_universe=max_bytes_per_universe / 3
universes = (pixels + pixels_per_universe - 1) / pixels_per_universe
print "Patch_Matrix_Size_Y=%d" % height
print "Patch_Matrix_Size_X=%d" % width
print "Patch_Num_Unis=%d" % universes
for universeId in range(0,universes):
print "Patch_Uni_ID_%d_IP1=%s" % (universeId, ip[0])
print "Patch_Uni_ID_%d_IP2=%s" % (universeId, ip[1])
print "Patch_Uni_ID_%d_IP3=%s" % (universeId, ip[2])
print "Patch_Uni_ID_%d_IP4=%s" % (universeId, ip[3])
print "Patch_Uni_ID_%d_Net_Nr=%d" % (universeId, universeId >> 8)
print "Patch_Uni_ID_%d_Sub_Net_Nr=%d" % (universeId, (universeId >> 4) & 0xF)
print "Patch_Uni_ID_%d_Uni_Nr=%d" % (universeId, universeId & 0xF)
print "Patch_Uni_ID_%d_Num_Ch=%d" % (universeId, min(pixels_per_universe, pixels - (pixels_per_universe * universeId)) * 3 )
for x in range(0, width):
for y in range(0, height):
offset = x + (y * width)
universeId = offset / pixels_per_universe
universe_offset = offset % pixels_per_universe
print "Patch_Pixel_X_%d_Y_%d_Uni_ID=%d" % (x,y,universeId)
print "Patch_Pixel_X_%d_Y_%d_Ch_R=%d" % (x,y,(universe_offset * 3)+0)
print "Patch_Pixel_X_%d_Y_%d_Ch_G=%d" % (x,y,(universe_offset * 3)+2)
print "Patch_Pixel_X_%d_Y_%d_Ch_B=%d" % (x,y,(universe_offset * 3)+1)
|
|
08f7cb8832ea361888a915c3e9532d8ff2cbd718
|
usr/examples/lbp_desc.py
|
usr/examples/lbp_desc.py
|
import sensor, time, image
sensor.reset()
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, "Please wait...")
d0 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
face = objects[0]
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(image.LBP, d0, d1)
img.draw_string(0, 10, "Match %d%%"%(dist))
img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
Add LBP descriptor example script
|
Add LBP descriptor example script
|
Python
|
mit
|
iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv
|
Add LBP descriptor example script
|
import sensor, time, image
sensor.reset()
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, "Please wait...")
d0 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
face = objects[0]
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(image.LBP, d0, d1)
img.draw_string(0, 10, "Match %d%%"%(dist))
img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
<commit_before><commit_msg>Add LBP descriptor example script<commit_after>
|
import sensor, time, image
sensor.reset()
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, "Please wait...")
d0 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
face = objects[0]
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(image.LBP, d0, d1)
img.draw_string(0, 10, "Match %d%%"%(dist))
img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
Add LBP descriptor example scriptimport sensor, time, image
sensor.reset()
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, "Please wait...")
d0 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
face = objects[0]
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(image.LBP, d0, d1)
img.draw_string(0, 10, "Match %d%%"%(dist))
img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
<commit_before><commit_msg>Add LBP descriptor example script<commit_after>import sensor, time, image
sensor.reset()
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 30):
img = sensor.snapshot()
img.draw_string(0, 0, "Please wait...")
d0 = None
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
face = objects[0]
d1 = img.find_lbp(face)
if (d0 == None):
d0 = d1
else:
dist = image.match_descriptor(image.LBP, d0, d1)
img.draw_string(0, 10, "Match %d%%"%(dist))
img.draw_rectangle(face)
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
|
75d58ad792380c182a6842c6695f537268932113
|
scripts/supersegger_trials.py
|
scripts/supersegger_trials.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
supersegger_trials
^^^^^^^^^^^^^^^^^^
Example to test/use supersegger files
"""
from tunacell import Experiment, Parser, Observable
from tunacell.plotting.samples import SamplePlot
def iterate(exp):
count = 0
for cell in exp.iter_cells():
count += 1
print('{} cells'.format(count))
print('last: {}'.format(cell))
return cell
def plot_samples(exp):
print(isinstance(exp, Experiment))
parser = Parser(exp)
parser.add_sample(1) # random sample
colony = parser.get_colony(0)
print(colony)
obs = Observable(name='length', raw='Long axis (L)')
colony.decompose(seed=357) # so that decomposition is always identical
myplot = SamplePlot([colony, ], parser=parser)
myplot.make_plot(obs)
myplot.save(user_bname='sample', add_obs=False, extension='.png')
if __name__ == '__main__':
location = '~/Boulot/Temp/seggerdir' # this is a supersegger root folder
exp = Experiment(path=location, filetype='supersegger')
cell = iterate(exp)
#
# exp._count_items()
plot_samples(exp)
|
Add example script to use supersegger io module
|
Add example script to use supersegger io module
|
Python
|
mit
|
LeBarbouze/tunacell
|
Add example script to use supersegger io module
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
supersegger_trials
^^^^^^^^^^^^^^^^^^
Example to test/use supersegger files
"""
from tunacell import Experiment, Parser, Observable
from tunacell.plotting.samples import SamplePlot
def iterate(exp):
count = 0
for cell in exp.iter_cells():
count += 1
print('{} cells'.format(count))
print('last: {}'.format(cell))
return cell
def plot_samples(exp):
print(isinstance(exp, Experiment))
parser = Parser(exp)
parser.add_sample(1) # random sample
colony = parser.get_colony(0)
print(colony)
obs = Observable(name='length', raw='Long axis (L)')
colony.decompose(seed=357) # so that decomposition is always identical
myplot = SamplePlot([colony, ], parser=parser)
myplot.make_plot(obs)
myplot.save(user_bname='sample', add_obs=False, extension='.png')
if __name__ == '__main__':
location = '~/Boulot/Temp/seggerdir' # this is a supersegger root folder
exp = Experiment(path=location, filetype='supersegger')
cell = iterate(exp)
#
# exp._count_items()
plot_samples(exp)
|
<commit_before><commit_msg>Add example script to use supersegger io module<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
supersegger_trials
^^^^^^^^^^^^^^^^^^
Example to test/use supersegger files
"""
from tunacell import Experiment, Parser, Observable
from tunacell.plotting.samples import SamplePlot
def iterate(exp):
count = 0
for cell in exp.iter_cells():
count += 1
print('{} cells'.format(count))
print('last: {}'.format(cell))
return cell
def plot_samples(exp):
print(isinstance(exp, Experiment))
parser = Parser(exp)
parser.add_sample(1) # random sample
colony = parser.get_colony(0)
print(colony)
obs = Observable(name='length', raw='Long axis (L)')
colony.decompose(seed=357) # so that decomposition is always identical
myplot = SamplePlot([colony, ], parser=parser)
myplot.make_plot(obs)
myplot.save(user_bname='sample', add_obs=False, extension='.png')
if __name__ == '__main__':
location = '~/Boulot/Temp/seggerdir' # this is a supersegger root folder
exp = Experiment(path=location, filetype='supersegger')
cell = iterate(exp)
#
# exp._count_items()
plot_samples(exp)
|
Add example script to use supersegger io module#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
supersegger_trials
^^^^^^^^^^^^^^^^^^
Example to test/use supersegger files
"""
from tunacell import Experiment, Parser, Observable
from tunacell.plotting.samples import SamplePlot
def iterate(exp):
count = 0
for cell in exp.iter_cells():
count += 1
print('{} cells'.format(count))
print('last: {}'.format(cell))
return cell
def plot_samples(exp):
print(isinstance(exp, Experiment))
parser = Parser(exp)
parser.add_sample(1) # random sample
colony = parser.get_colony(0)
print(colony)
obs = Observable(name='length', raw='Long axis (L)')
colony.decompose(seed=357) # so that decomposition is always identical
myplot = SamplePlot([colony, ], parser=parser)
myplot.make_plot(obs)
myplot.save(user_bname='sample', add_obs=False, extension='.png')
if __name__ == '__main__':
location = '~/Boulot/Temp/seggerdir' # this is a supersegger root folder
exp = Experiment(path=location, filetype='supersegger')
cell = iterate(exp)
#
# exp._count_items()
plot_samples(exp)
|
<commit_before><commit_msg>Add example script to use supersegger io module<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
supersegger_trials
^^^^^^^^^^^^^^^^^^
Example to test/use supersegger files
"""
from tunacell import Experiment, Parser, Observable
from tunacell.plotting.samples import SamplePlot
def iterate(exp):
count = 0
for cell in exp.iter_cells():
count += 1
print('{} cells'.format(count))
print('last: {}'.format(cell))
return cell
def plot_samples(exp):
print(isinstance(exp, Experiment))
parser = Parser(exp)
parser.add_sample(1) # random sample
colony = parser.get_colony(0)
print(colony)
obs = Observable(name='length', raw='Long axis (L)')
colony.decompose(seed=357) # so that decomposition is always identical
myplot = SamplePlot([colony, ], parser=parser)
myplot.make_plot(obs)
myplot.save(user_bname='sample', add_obs=False, extension='.png')
if __name__ == '__main__':
location = '~/Boulot/Temp/seggerdir' # this is a supersegger root folder
exp = Experiment(path=location, filetype='supersegger')
cell = iterate(exp)
#
# exp._count_items()
plot_samples(exp)
|
|
39d0261d8a325be1c6d50d3a47e0563f3dcdfa8e
|
netbox/tenancy/tests/test_filters.py
|
netbox/tenancy/tests/test_filters.py
|
from django.test import TestCase
from tenancy.filters import TenantFilter, TenantGroupFilter
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
def test_id(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id': [str(id) for id in id_list]}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_id__in(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id__in': ','.join([str(id) for id in id_list])}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
|
Add tests for tenancy filters
|
Add tests for tenancy filters
|
Python
|
apache-2.0
|
digitalocean/netbox,digitalocean/netbox,digitalocean/netbox,digitalocean/netbox
|
Add tests for tenancy filters
|
from django.test import TestCase
from tenancy.filters import TenantFilter, TenantGroupFilter
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
def test_id(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id': [str(id) for id in id_list]}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_id__in(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id__in': ','.join([str(id) for id in id_list])}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
|
<commit_before><commit_msg>Add tests for tenancy filters<commit_after>
|
from django.test import TestCase
from tenancy.filters import TenantFilter, TenantGroupFilter
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
def test_id(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id': [str(id) for id in id_list]}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_id__in(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id__in': ','.join([str(id) for id in id_list])}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
|
Add tests for tenancy filtersfrom django.test import TestCase
from tenancy.filters import TenantFilter, TenantGroupFilter
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
def test_id(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id': [str(id) for id in id_list]}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_id__in(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id__in': ','.join([str(id) for id in id_list])}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
|
<commit_before><commit_msg>Add tests for tenancy filters<commit_after>from django.test import TestCase
from tenancy.filters import TenantFilter, TenantGroupFilter
from tenancy.models import Tenant, TenantGroup
class TenantGroupTestCase(TestCase):
queryset = TenantGroup.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
def test_id(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id': [str(id) for id in id_list]}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-group-1', 'tenant-group-2']}
self.assertEqual(TenantGroupFilter(params, self.queryset).qs.count(), 2)
class TenantTestCase(TestCase):
queryset = Tenant.objects.all()
@classmethod
def setUpTestData(cls):
groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
TenantGroup.objects.bulk_create(groups)
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=groups[2]),
)
Tenant.objects.bulk_create(tenants)
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_slug(self):
params = {'slug': ['tenant-1', 'tenant-2']}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_id__in(self):
id_list = self.queryset.values_list('id', flat=True)[:2]
params = {'id__in': ','.join([str(id) for id in id_list])}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
def test_group(self):
group = TenantGroup.objects.all()[:2]
params = {'group_id': [group[0].pk, group[1].pk]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
params = {'group': [group[0].slug, group[1].slug]}
self.assertEqual(TenantFilter(params, self.queryset).qs.count(), 2)
|
|
6288ec2dfff70667c7fcbfa76a25395f2aaed744
|
examples/train_mnist/run_experiments.py
|
examples/train_mnist/run_experiments.py
|
import fire
from datetime import datetime
from itertools import product
from itertools import izip
import os
import subprocess
# Dictionary of lists to list of dictionaries
def dl_to_ld(dicts):
return list(dict(izip(dicts, x)) for x in product(*dicts.itervalues()))
def main(gpu=-1):
##########################################################################
# User settings
##########################################################################
base_cmd = 'python train_mnist.py'
options = {
'gpu': [gpu],
'unit': [500, 1000],
}
base_dir = 'result'
##########################################################################
options_list = dl_to_ld(options)
dt = datetime.now()
date = dt.strftime('%Y_%m_%d_%H_%M')
for i, opts in enumerate(options_list):
cmd = base_cmd
out = os.path.join(base_dir, date, 'iter_{}'.format(i))
cmd += ' --out {} '.format(out)
for key, val in opts.items():
cmd += ' --{} {} '.format(key, val)
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
fire.Fire(main)
|
Make sample python script to run multiple experiments
|
Make sample python script to run multiple experiments
|
Python
|
mit
|
yuyu2172/ilv
|
Make sample python script to run multiple experiments
|
import fire
from datetime import datetime
from itertools import product
from itertools import izip
import os
import subprocess
# Dictionary of lists to list of dictionaries
def dl_to_ld(dicts):
return list(dict(izip(dicts, x)) for x in product(*dicts.itervalues()))
def main(gpu=-1):
##########################################################################
# User settings
##########################################################################
base_cmd = 'python train_mnist.py'
options = {
'gpu': [gpu],
'unit': [500, 1000],
}
base_dir = 'result'
##########################################################################
options_list = dl_to_ld(options)
dt = datetime.now()
date = dt.strftime('%Y_%m_%d_%H_%M')
for i, opts in enumerate(options_list):
cmd = base_cmd
out = os.path.join(base_dir, date, 'iter_{}'.format(i))
cmd += ' --out {} '.format(out)
for key, val in opts.items():
cmd += ' --{} {} '.format(key, val)
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
fire.Fire(main)
|
<commit_before><commit_msg>Make sample python script to run multiple experiments<commit_after>
|
import fire
from datetime import datetime
from itertools import product
from itertools import izip
import os
import subprocess
# Dictionary of lists to list of dictionaries
def dl_to_ld(dicts):
return list(dict(izip(dicts, x)) for x in product(*dicts.itervalues()))
def main(gpu=-1):
##########################################################################
# User settings
##########################################################################
base_cmd = 'python train_mnist.py'
options = {
'gpu': [gpu],
'unit': [500, 1000],
}
base_dir = 'result'
##########################################################################
options_list = dl_to_ld(options)
dt = datetime.now()
date = dt.strftime('%Y_%m_%d_%H_%M')
for i, opts in enumerate(options_list):
cmd = base_cmd
out = os.path.join(base_dir, date, 'iter_{}'.format(i))
cmd += ' --out {} '.format(out)
for key, val in opts.items():
cmd += ' --{} {} '.format(key, val)
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
fire.Fire(main)
|
Make sample python script to run multiple experimentsimport fire
from datetime import datetime
from itertools import product
from itertools import izip
import os
import subprocess
# Dictionary of lists to list of dictionaries
def dl_to_ld(dicts):
return list(dict(izip(dicts, x)) for x in product(*dicts.itervalues()))
def main(gpu=-1):
##########################################################################
# User settings
##########################################################################
base_cmd = 'python train_mnist.py'
options = {
'gpu': [gpu],
'unit': [500, 1000],
}
base_dir = 'result'
##########################################################################
options_list = dl_to_ld(options)
dt = datetime.now()
date = dt.strftime('%Y_%m_%d_%H_%M')
for i, opts in enumerate(options_list):
cmd = base_cmd
out = os.path.join(base_dir, date, 'iter_{}'.format(i))
cmd += ' --out {} '.format(out)
for key, val in opts.items():
cmd += ' --{} {} '.format(key, val)
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
fire.Fire(main)
|
<commit_before><commit_msg>Make sample python script to run multiple experiments<commit_after>import fire
from datetime import datetime
from itertools import product
from itertools import izip
import os
import subprocess
# Dictionary of lists to list of dictionaries
def dl_to_ld(dicts):
return list(dict(izip(dicts, x)) for x in product(*dicts.itervalues()))
def main(gpu=-1):
##########################################################################
# User settings
##########################################################################
base_cmd = 'python train_mnist.py'
options = {
'gpu': [gpu],
'unit': [500, 1000],
}
base_dir = 'result'
##########################################################################
options_list = dl_to_ld(options)
dt = datetime.now()
date = dt.strftime('%Y_%m_%d_%H_%M')
for i, opts in enumerate(options_list):
cmd = base_cmd
out = os.path.join(base_dir, date, 'iter_{}'.format(i))
cmd += ' --out {} '.format(out)
for key, val in opts.items():
cmd += ' --{} {} '.format(key, val)
print(cmd)
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
fire.Fire(main)
|
|
cd3a1b4cb31c30860734df7b16879e5f8ab46214
|
elephantblog/templatetags/elephantblog_tags.py
|
elephantblog/templatetags/elephantblog_tags.py
|
from django import template
from elephantblog.models import Category, Entry
register = template.Library()
@register.assignment_tag
def elephantblog_categories():
return Category.objects.all()
@register.assignment_tag
def elephantblog_archive_months():
return Entry.objects.active().dates('published_on', 'month', 'DESC')
|
Add tags for a blog sidebar
|
Add tags for a blog sidebar
|
Python
|
bsd-3-clause
|
sbaechler/feincms-elephantblog,michaelkuty/feincms-elephantblog,sbaechler/feincms-elephantblog,feincms/feincms-elephantblog,sbaechler/feincms-elephantblog,matthiask/feincms-elephantblog,feincms/feincms-elephantblog,matthiask/feincms-elephantblog,joshuajonah/feincms-elephantblog,matthiask/feincms-elephantblog,joshuajonah/feincms-elephantblog,joshuajonah/feincms-elephantblog,michaelkuty/feincms-elephantblog,michaelkuty/feincms-elephantblog
|
Add tags for a blog sidebar
|
from django import template
from elephantblog.models import Category, Entry
register = template.Library()
@register.assignment_tag
def elephantblog_categories():
return Category.objects.all()
@register.assignment_tag
def elephantblog_archive_months():
return Entry.objects.active().dates('published_on', 'month', 'DESC')
|
<commit_before><commit_msg>Add tags for a blog sidebar<commit_after>
|
from django import template
from elephantblog.models import Category, Entry
register = template.Library()
@register.assignment_tag
def elephantblog_categories():
return Category.objects.all()
@register.assignment_tag
def elephantblog_archive_months():
return Entry.objects.active().dates('published_on', 'month', 'DESC')
|
Add tags for a blog sidebarfrom django import template
from elephantblog.models import Category, Entry
register = template.Library()
@register.assignment_tag
def elephantblog_categories():
return Category.objects.all()
@register.assignment_tag
def elephantblog_archive_months():
return Entry.objects.active().dates('published_on', 'month', 'DESC')
|
<commit_before><commit_msg>Add tags for a blog sidebar<commit_after>from django import template
from elephantblog.models import Category, Entry
register = template.Library()
@register.assignment_tag
def elephantblog_categories():
return Category.objects.all()
@register.assignment_tag
def elephantblog_archive_months():
return Entry.objects.active().dates('published_on', 'month', 'DESC')
|
|
a9c53bc97c0e62a959c1115ec61d0a28d71aac68
|
devtools/ci/update-versions.py
|
devtools/ci/update-versions.py
|
from __future__ import print_function
import os
import boto
from boto.s3.key import Key
import msmbuilder.version
if msmbuilder.version.release:
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
root = 'doc/_build'
versions = json.load(urllib2.urlopen('http://www.msmbuilder.org/versions.json'))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({'version' : msmbuilder.version.short_version, 'latest' : True})
k = Key(bucket)
k.key = 'versions.json'
k.set_contents_from_string(json.dumps(versions))
else:
print("This is not a release.")
|
from __future__ import print_function
import os
import pip
import json
from tempfile import NamedTemporaryFile
import subprocess
from msmbuilder import version
from six.moves.urllib.request import urlopen
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd')
URL = 'http://www.msmbuilder.org/versions.json'
BUCKET_NAME = 'msmbuilder.org'
if not version.release:
print("This is not a release.")
exit(0)
versions = json.load(urlopen(URL))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'latest': True})
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with NamedTemporaryFile('w') as config, NamedTemporaryFile('w') as v:
config.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
json.dump(versions, v)
config.flush()
v.flush()
template = ('s3cmd --config {config} '
'put {vfile} s3://{bucket}/versions.json')
cmd = template.format(
config=config.name,
vfile=v.name,
bucket=BUCKET_NAME)
subprocess.call(cmd.split())
|
Fix script for updating version dropdown
|
Fix script for updating version dropdown
|
Python
|
lgpl-2.1
|
mpharrigan/mixtape,brookehus/msmbuilder,peastman/msmbuilder,peastman/msmbuilder,rafwiewiora/msmbuilder,dr-nate/msmbuilder,dr-nate/msmbuilder,msmbuilder/msmbuilder,peastman/msmbuilder,Eigenstate/msmbuilder,Eigenstate/msmbuilder,msultan/msmbuilder,msultan/msmbuilder,rmcgibbo/msmbuilder,msmbuilder/msmbuilder,msultan/msmbuilder,cxhernandez/msmbuilder,rmcgibbo/msmbuilder,Eigenstate/msmbuilder,stephenliu1989/msmbuilder,dotsdl/msmbuilder,peastman/msmbuilder,cxhernandez/msmbuilder,stephenliu1989/msmbuilder,rafwiewiora/msmbuilder,mpharrigan/mixtape,rafwiewiora/msmbuilder,brookehus/msmbuilder,rmcgibbo/msmbuilder,mpharrigan/mixtape,Eigenstate/msmbuilder,msultan/msmbuilder,cxhernandez/msmbuilder,cxhernandez/msmbuilder,peastman/msmbuilder,msmbuilder/msmbuilder,rafwiewiora/msmbuilder,dotsdl/msmbuilder,cxhernandez/msmbuilder,msultan/msmbuilder,brookehus/msmbuilder,dotsdl/msmbuilder,mpharrigan/mixtape,dotsdl/msmbuilder,rmcgibbo/msmbuilder,rafwiewiora/msmbuilder,stephenliu1989/msmbuilder,stephenliu1989/msmbuilder,msmbuilder/msmbuilder,dr-nate/msmbuilder,Eigenstate/msmbuilder,mpharrigan/mixtape,dr-nate/msmbuilder,brookehus/msmbuilder,dr-nate/msmbuilder,msmbuilder/msmbuilder,brookehus/msmbuilder
|
from __future__ import print_function
import os
import boto
from boto.s3.key import Key
import msmbuilder.version
if msmbuilder.version.release:
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
root = 'doc/_build'
versions = json.load(urllib2.urlopen('http://www.msmbuilder.org/versions.json'))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({'version' : msmbuilder.version.short_version, 'latest' : True})
k = Key(bucket)
k.key = 'versions.json'
k.set_contents_from_string(json.dumps(versions))
else:
print("This is not a release.")
Fix script for updating version dropdown
|
from __future__ import print_function
import os
import pip
import json
from tempfile import NamedTemporaryFile
import subprocess
from msmbuilder import version
from six.moves.urllib.request import urlopen
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd')
URL = 'http://www.msmbuilder.org/versions.json'
BUCKET_NAME = 'msmbuilder.org'
if not version.release:
print("This is not a release.")
exit(0)
versions = json.load(urlopen(URL))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'latest': True})
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with NamedTemporaryFile('w') as config, NamedTemporaryFile('w') as v:
config.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
json.dump(versions, v)
config.flush()
v.flush()
template = ('s3cmd --config {config} '
'put {vfile} s3://{bucket}/versions.json')
cmd = template.format(
config=config.name,
vfile=v.name,
bucket=BUCKET_NAME)
subprocess.call(cmd.split())
|
<commit_before>from __future__ import print_function
import os
import boto
from boto.s3.key import Key
import msmbuilder.version
if msmbuilder.version.release:
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
root = 'doc/_build'
versions = json.load(urllib2.urlopen('http://www.msmbuilder.org/versions.json'))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({'version' : msmbuilder.version.short_version, 'latest' : True})
k = Key(bucket)
k.key = 'versions.json'
k.set_contents_from_string(json.dumps(versions))
else:
print("This is not a release.")
<commit_msg>Fix script for updating version dropdown<commit_after>
|
from __future__ import print_function
import os
import pip
import json
from tempfile import NamedTemporaryFile
import subprocess
from msmbuilder import version
from six.moves.urllib.request import urlopen
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd')
URL = 'http://www.msmbuilder.org/versions.json'
BUCKET_NAME = 'msmbuilder.org'
if not version.release:
print("This is not a release.")
exit(0)
versions = json.load(urlopen(URL))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'latest': True})
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with NamedTemporaryFile('w') as config, NamedTemporaryFile('w') as v:
config.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
json.dump(versions, v)
config.flush()
v.flush()
template = ('s3cmd --config {config} '
'put {vfile} s3://{bucket}/versions.json')
cmd = template.format(
config=config.name,
vfile=v.name,
bucket=BUCKET_NAME)
subprocess.call(cmd.split())
|
from __future__ import print_function
import os
import boto
from boto.s3.key import Key
import msmbuilder.version
if msmbuilder.version.release:
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
root = 'doc/_build'
versions = json.load(urllib2.urlopen('http://www.msmbuilder.org/versions.json'))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({'version' : msmbuilder.version.short_version, 'latest' : True})
k = Key(bucket)
k.key = 'versions.json'
k.set_contents_from_string(json.dumps(versions))
else:
print("This is not a release.")
Fix script for updating version dropdownfrom __future__ import print_function
import os
import pip
import json
from tempfile import NamedTemporaryFile
import subprocess
from msmbuilder import version
from six.moves.urllib.request import urlopen
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd')
URL = 'http://www.msmbuilder.org/versions.json'
BUCKET_NAME = 'msmbuilder.org'
if not version.release:
print("This is not a release.")
exit(0)
versions = json.load(urlopen(URL))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'latest': True})
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with NamedTemporaryFile('w') as config, NamedTemporaryFile('w') as v:
config.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
json.dump(versions, v)
config.flush()
v.flush()
template = ('s3cmd --config {config} '
'put {vfile} s3://{bucket}/versions.json')
cmd = template.format(
config=config.name,
vfile=v.name,
bucket=BUCKET_NAME)
subprocess.call(cmd.split())
|
<commit_before>from __future__ import print_function
import os
import boto
from boto.s3.key import Key
import msmbuilder.version
if msmbuilder.version.release:
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
BUCKET_NAME = 'msmbuilder.org'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-' + BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
root = 'doc/_build'
versions = json.load(urllib2.urlopen('http://www.msmbuilder.org/versions.json'))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({'version' : msmbuilder.version.short_version, 'latest' : True})
k = Key(bucket)
k.key = 'versions.json'
k.set_contents_from_string(json.dumps(versions))
else:
print("This is not a release.")
<commit_msg>Fix script for updating version dropdown<commit_after>from __future__ import print_function
import os
import pip
import json
from tempfile import NamedTemporaryFile
import subprocess
from msmbuilder import version
from six.moves.urllib.request import urlopen
if not any(d.project_name == 's3cmd' for d in pip.get_installed_distributions()):
raise ImportError('The s3cmd pacakge is required. try $ pip install s3cmd')
URL = 'http://www.msmbuilder.org/versions.json'
BUCKET_NAME = 'msmbuilder.org'
if not version.release:
print("This is not a release.")
exit(0)
versions = json.load(urlopen(URL))
# new release so all the others are now old
for i in xrange(len(versions)):
versions[i]['latest'] = False
versions.append({
'version': version.short_version,
'latest': True})
# The secret key is available as a secure environment variable
# on travis-ci to push the build documentation to Amazon S3.
with NamedTemporaryFile('w') as config, NamedTemporaryFile('w') as v:
config.write('''[default]
access_key = {AWS_ACCESS_KEY_ID}
secret_key = {AWS_SECRET_ACCESS_KEY}
'''.format(**os.environ))
json.dump(versions, v)
config.flush()
v.flush()
template = ('s3cmd --config {config} '
'put {vfile} s3://{bucket}/versions.json')
cmd = template.format(
config=config.name,
vfile=v.name,
bucket=BUCKET_NAME)
subprocess.call(cmd.split())
|
e8d5306716cb0d2fbc88e79175098dc17e475e84
|
src/main/scala/find-unused.py
|
src/main/scala/find-unused.py
|
import os
from re import *
def read_file(name):
with open(name, 'r') as f:
return f.read()
usage_regex = compile(r'Module\(\s*new\s+(\w+)')
def usages(file_name):
"""Given a file, find all scala classes it uses"""
if not file_name:
return set()
file = read_file(file_name)
objects = findall(usage_regex, file)
return set(objects)
def to_file(obj):
"""Given a scala class, find the file in which it resides"""
decl_regex = compile(f'class\s+{obj}\(')
dir_files = os.listdir()
possible_filename = f'{obj}.scala'
if possible_filename in dir_files:
return possible_filename
for file_name in dir_files:
file = read_file(file_name)
if findall(decl_regex, file):
return file_name
def find_all(start):
seen = set()
frontier = set()
frontier.add(start)
while frontier:
e = frontier.pop()
seen.add(e)
new = usages(to_file(e)).difference(seen)
frontier.update(new)
return seen
if __name__ == '__main__':
import sys
import itertools
if len(sys.argv) < 1:
print('Usage: `python find-unused.py <entry class>`')
sys.exit(1)
classes = find_all(sys.argv[1])
files = list(set([e for e in [to_file(c) for c in classes] if e]))
if files:
files.sort()
print('# Used files:')
for file in files:
print(f' - {file}')
dir_files = set(os.listdir())
used_files = set(files)
diff = list(dir_files.difference(used_files))
if diff:
diff.sort()
print('\n# Unused files:')
for file in diff:
if '.scala' in file:
print(f' - {file}')
|
Add script for finding unused files
|
Add script for finding unused files
|
Python
|
mit
|
martinhath/bnn
|
Add script for finding unused files
|
import os
from re import *
def read_file(name):
with open(name, 'r') as f:
return f.read()
usage_regex = compile(r'Module\(\s*new\s+(\w+)')
def usages(file_name):
"""Given a file, find all scala classes it uses"""
if not file_name:
return set()
file = read_file(file_name)
objects = findall(usage_regex, file)
return set(objects)
def to_file(obj):
"""Given a scala class, find the file in which it resides"""
decl_regex = compile(f'class\s+{obj}\(')
dir_files = os.listdir()
possible_filename = f'{obj}.scala'
if possible_filename in dir_files:
return possible_filename
for file_name in dir_files:
file = read_file(file_name)
if findall(decl_regex, file):
return file_name
def find_all(start):
seen = set()
frontier = set()
frontier.add(start)
while frontier:
e = frontier.pop()
seen.add(e)
new = usages(to_file(e)).difference(seen)
frontier.update(new)
return seen
if __name__ == '__main__':
import sys
import itertools
if len(sys.argv) < 1:
print('Usage: `python find-unused.py <entry class>`')
sys.exit(1)
classes = find_all(sys.argv[1])
files = list(set([e for e in [to_file(c) for c in classes] if e]))
if files:
files.sort()
print('# Used files:')
for file in files:
print(f' - {file}')
dir_files = set(os.listdir())
used_files = set(files)
diff = list(dir_files.difference(used_files))
if diff:
diff.sort()
print('\n# Unused files:')
for file in diff:
if '.scala' in file:
print(f' - {file}')
|
<commit_before><commit_msg>Add script for finding unused files<commit_after>
|
import os
from re import *
def read_file(name):
with open(name, 'r') as f:
return f.read()
usage_regex = compile(r'Module\(\s*new\s+(\w+)')
def usages(file_name):
"""Given a file, find all scala classes it uses"""
if not file_name:
return set()
file = read_file(file_name)
objects = findall(usage_regex, file)
return set(objects)
def to_file(obj):
"""Given a scala class, find the file in which it resides"""
decl_regex = compile(f'class\s+{obj}\(')
dir_files = os.listdir()
possible_filename = f'{obj}.scala'
if possible_filename in dir_files:
return possible_filename
for file_name in dir_files:
file = read_file(file_name)
if findall(decl_regex, file):
return file_name
def find_all(start):
seen = set()
frontier = set()
frontier.add(start)
while frontier:
e = frontier.pop()
seen.add(e)
new = usages(to_file(e)).difference(seen)
frontier.update(new)
return seen
if __name__ == '__main__':
import sys
import itertools
if len(sys.argv) < 1:
print('Usage: `python find-unused.py <entry class>`')
sys.exit(1)
classes = find_all(sys.argv[1])
files = list(set([e for e in [to_file(c) for c in classes] if e]))
if files:
files.sort()
print('# Used files:')
for file in files:
print(f' - {file}')
dir_files = set(os.listdir())
used_files = set(files)
diff = list(dir_files.difference(used_files))
if diff:
diff.sort()
print('\n# Unused files:')
for file in diff:
if '.scala' in file:
print(f' - {file}')
|
Add script for finding unused filesimport os
from re import *
def read_file(name):
with open(name, 'r') as f:
return f.read()
usage_regex = compile(r'Module\(\s*new\s+(\w+)')
def usages(file_name):
"""Given a file, find all scala classes it uses"""
if not file_name:
return set()
file = read_file(file_name)
objects = findall(usage_regex, file)
return set(objects)
def to_file(obj):
"""Given a scala class, find the file in which it resides"""
decl_regex = compile(f'class\s+{obj}\(')
dir_files = os.listdir()
possible_filename = f'{obj}.scala'
if possible_filename in dir_files:
return possible_filename
for file_name in dir_files:
file = read_file(file_name)
if findall(decl_regex, file):
return file_name
def find_all(start):
seen = set()
frontier = set()
frontier.add(start)
while frontier:
e = frontier.pop()
seen.add(e)
new = usages(to_file(e)).difference(seen)
frontier.update(new)
return seen
if __name__ == '__main__':
import sys
import itertools
if len(sys.argv) < 1:
print('Usage: `python find-unused.py <entry class>`')
sys.exit(1)
classes = find_all(sys.argv[1])
files = list(set([e for e in [to_file(c) for c in classes] if e]))
if files:
files.sort()
print('# Used files:')
for file in files:
print(f' - {file}')
dir_files = set(os.listdir())
used_files = set(files)
diff = list(dir_files.difference(used_files))
if diff:
diff.sort()
print('\n# Unused files:')
for file in diff:
if '.scala' in file:
print(f' - {file}')
|
<commit_before><commit_msg>Add script for finding unused files<commit_after>import os
from re import *
def read_file(name):
with open(name, 'r') as f:
return f.read()
usage_regex = compile(r'Module\(\s*new\s+(\w+)')
def usages(file_name):
"""Given a file, find all scala classes it uses"""
if not file_name:
return set()
file = read_file(file_name)
objects = findall(usage_regex, file)
return set(objects)
def to_file(obj):
"""Given a scala class, find the file in which it resides"""
decl_regex = compile(f'class\s+{obj}\(')
dir_files = os.listdir()
possible_filename = f'{obj}.scala'
if possible_filename in dir_files:
return possible_filename
for file_name in dir_files:
file = read_file(file_name)
if findall(decl_regex, file):
return file_name
def find_all(start):
seen = set()
frontier = set()
frontier.add(start)
while frontier:
e = frontier.pop()
seen.add(e)
new = usages(to_file(e)).difference(seen)
frontier.update(new)
return seen
if __name__ == '__main__':
import sys
import itertools
if len(sys.argv) < 1:
print('Usage: `python find-unused.py <entry class>`')
sys.exit(1)
classes = find_all(sys.argv[1])
files = list(set([e for e in [to_file(c) for c in classes] if e]))
if files:
files.sort()
print('# Used files:')
for file in files:
print(f' - {file}')
dir_files = set(os.listdir())
used_files = set(files)
diff = list(dir_files.difference(used_files))
if diff:
diff.sort()
print('\n# Unused files:')
for file in diff:
if '.scala' in file:
print(f' - {file}')
|
|
6a9ef33a996b40ca67b5b1d894c90396a8e5b158
|
busbus/provider/lawrenceks.py
|
busbus/provider/lawrenceks.py
|
from busbus.provider import ProviderBase
from busbus.provider.gtfs import GTFSMixin
class LawrenceTransitProvider(GTFSMixin, ProviderBase):
gtfs_url = ("http://lawrenceks.org/assets/gis/google-transit/"
"google_transit.zip")
def __init__(self, engine=None):
super(LawrenceTransitProvider, self).__init__(engine, self.gtfs_url)
|
Add Lawrence Transit (KS) GTFS provider
|
Add Lawrence Transit (KS) GTFS provider
|
Python
|
mit
|
spaceboats/busbus
|
Add Lawrence Transit (KS) GTFS provider
|
from busbus.provider import ProviderBase
from busbus.provider.gtfs import GTFSMixin
class LawrenceTransitProvider(GTFSMixin, ProviderBase):
gtfs_url = ("http://lawrenceks.org/assets/gis/google-transit/"
"google_transit.zip")
def __init__(self, engine=None):
super(LawrenceTransitProvider, self).__init__(engine, self.gtfs_url)
|
<commit_before><commit_msg>Add Lawrence Transit (KS) GTFS provider<commit_after>
|
from busbus.provider import ProviderBase
from busbus.provider.gtfs import GTFSMixin
class LawrenceTransitProvider(GTFSMixin, ProviderBase):
gtfs_url = ("http://lawrenceks.org/assets/gis/google-transit/"
"google_transit.zip")
def __init__(self, engine=None):
super(LawrenceTransitProvider, self).__init__(engine, self.gtfs_url)
|
Add Lawrence Transit (KS) GTFS providerfrom busbus.provider import ProviderBase
from busbus.provider.gtfs import GTFSMixin
class LawrenceTransitProvider(GTFSMixin, ProviderBase):
gtfs_url = ("http://lawrenceks.org/assets/gis/google-transit/"
"google_transit.zip")
def __init__(self, engine=None):
super(LawrenceTransitProvider, self).__init__(engine, self.gtfs_url)
|
<commit_before><commit_msg>Add Lawrence Transit (KS) GTFS provider<commit_after>from busbus.provider import ProviderBase
from busbus.provider.gtfs import GTFSMixin
class LawrenceTransitProvider(GTFSMixin, ProviderBase):
gtfs_url = ("http://lawrenceks.org/assets/gis/google-transit/"
"google_transit.zip")
def __init__(self, engine=None):
super(LawrenceTransitProvider, self).__init__(engine, self.gtfs_url)
|
|
61b1410af66bbdbdc3ac2bb222b43a2aebc9beb8
|
CodeFights/magicalWell.py
|
CodeFights/magicalWell.py
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def magicalWell(a, b, n):
return sum([x * y for (x, y) in zip(range(a, a + n), range(b, b + n))])
def main():
tests = [
[1, 2, 2, 8],
[1, 1, 1, 1],
[6, 5, 3, 128]
]
for t in tests:
res = magicalWell(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: magicalWell({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: magicalWell({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights magical well problem
|
Solve Code Fights magical well problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights magical well problem
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def magicalWell(a, b, n):
return sum([x * y for (x, y) in zip(range(a, a + n), range(b, b + n))])
def main():
tests = [
[1, 2, 2, 8],
[1, 1, 1, 1],
[6, 5, 3, 128]
]
for t in tests:
res = magicalWell(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: magicalWell({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: magicalWell({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights magical well problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def magicalWell(a, b, n):
return sum([x * y for (x, y) in zip(range(a, a + n), range(b, b + n))])
def main():
tests = [
[1, 2, 2, 8],
[1, 1, 1, 1],
[6, 5, 3, 128]
]
for t in tests:
res = magicalWell(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: magicalWell({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: magicalWell({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights magical well problem#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def magicalWell(a, b, n):
return sum([x * y for (x, y) in zip(range(a, a + n), range(b, b + n))])
def main():
tests = [
[1, 2, 2, 8],
[1, 1, 1, 1],
[6, 5, 3, 128]
]
for t in tests:
res = magicalWell(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: magicalWell({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: magicalWell({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights magical well problem<commit_after>#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def magicalWell(a, b, n):
return sum([x * y for (x, y) in zip(range(a, a + n), range(b, b + n))])
def main():
tests = [
[1, 2, 2, 8],
[1, 1, 1, 1],
[6, 5, 3, 128]
]
for t in tests:
res = magicalWell(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: magicalWell({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: magicalWell({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
|
450a670635d676005e598ae3a3d96eb5d80756af
|
leetcode/algorithms/edit-distance/solution.py
|
leetcode/algorithms/edit-distance/solution.py
|
#!/usr/bin/env python
class Solution(object):
def minDistance(self, a, b):
"""
Returns the edit distance between strings a and b.
"""
n = len(a)
m = len(b)
# If either string is empty, we need to add all characters from other
# string.
if n == 0 or m == 0:
return max(n, m)
# n x m matrix where each dp[i][j] represents the edit distance for
# a[:i + 1] and b[:j + 1].
dp = [([0] * (m + 1)) for i in range(0, n + 1)]
for i in range(0, n + 1):
for j in range(0, m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif a[i - 1] == b[j - 1]:
# If the trailing characters are the same, we don't need to
# perform an operation to bring these characters in sync.
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + \
min(dp[i - 1][j - 1], # Replace a[i] with b[j]
dp[i][j - 1], # Add a[i] to b[:j] (Insert)
dp[i - 1][j]) # Add b[j] to a[:i] (Delete)
return dp[n][m]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/edit-distance/')
if __name__ == '__main__':
main()
|
Implement Edit Distance in Python
|
Implement Edit Distance in Python
|
Python
|
mit
|
andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms
|
Implement Edit Distance in Python
|
#!/usr/bin/env python
class Solution(object):
def minDistance(self, a, b):
"""
Returns the edit distance between strings a and b.
"""
n = len(a)
m = len(b)
# If either string is empty, we need to add all characters from other
# string.
if n == 0 or m == 0:
return max(n, m)
# n x m matrix where each dp[i][j] represents the edit distance for
# a[:i + 1] and b[:j + 1].
dp = [([0] * (m + 1)) for i in range(0, n + 1)]
for i in range(0, n + 1):
for j in range(0, m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif a[i - 1] == b[j - 1]:
# If the trailing characters are the same, we don't need to
# perform an operation to bring these characters in sync.
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + \
min(dp[i - 1][j - 1], # Replace a[i] with b[j]
dp[i][j - 1], # Add a[i] to b[:j] (Insert)
dp[i - 1][j]) # Add b[j] to a[:i] (Delete)
return dp[n][m]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/edit-distance/')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Implement Edit Distance in Python<commit_after>
|
#!/usr/bin/env python
class Solution(object):
def minDistance(self, a, b):
"""
Returns the edit distance between strings a and b.
"""
n = len(a)
m = len(b)
# If either string is empty, we need to add all characters from other
# string.
if n == 0 or m == 0:
return max(n, m)
# n x m matrix where each dp[i][j] represents the edit distance for
# a[:i + 1] and b[:j + 1].
dp = [([0] * (m + 1)) for i in range(0, n + 1)]
for i in range(0, n + 1):
for j in range(0, m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif a[i - 1] == b[j - 1]:
# If the trailing characters are the same, we don't need to
# perform an operation to bring these characters in sync.
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + \
min(dp[i - 1][j - 1], # Replace a[i] with b[j]
dp[i][j - 1], # Add a[i] to b[:j] (Insert)
dp[i - 1][j]) # Add b[j] to a[:i] (Delete)
return dp[n][m]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/edit-distance/')
if __name__ == '__main__':
main()
|
Implement Edit Distance in Python#!/usr/bin/env python
class Solution(object):
def minDistance(self, a, b):
"""
Returns the edit distance between strings a and b.
"""
n = len(a)
m = len(b)
# If either string is empty, we need to add all characters from other
# string.
if n == 0 or m == 0:
return max(n, m)
# n x m matrix where each dp[i][j] represents the edit distance for
# a[:i + 1] and b[:j + 1].
dp = [([0] * (m + 1)) for i in range(0, n + 1)]
for i in range(0, n + 1):
for j in range(0, m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif a[i - 1] == b[j - 1]:
# If the trailing characters are the same, we don't need to
# perform an operation to bring these characters in sync.
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + \
min(dp[i - 1][j - 1], # Replace a[i] with b[j]
dp[i][j - 1], # Add a[i] to b[:j] (Insert)
dp[i - 1][j]) # Add b[j] to a[:i] (Delete)
return dp[n][m]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/edit-distance/')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Implement Edit Distance in Python<commit_after>#!/usr/bin/env python
class Solution(object):
def minDistance(self, a, b):
"""
Returns the edit distance between strings a and b.
"""
n = len(a)
m = len(b)
# If either string is empty, we need to add all characters from other
# string.
if n == 0 or m == 0:
return max(n, m)
# n x m matrix where each dp[i][j] represents the edit distance for
# a[:i + 1] and b[:j + 1].
dp = [([0] * (m + 1)) for i in range(0, n + 1)]
for i in range(0, n + 1):
for j in range(0, m + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif a[i - 1] == b[j - 1]:
# If the trailing characters are the same, we don't need to
# perform an operation to bring these characters in sync.
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + \
min(dp[i - 1][j - 1], # Replace a[i] with b[j]
dp[i][j - 1], # Add a[i] to b[:j] (Insert)
dp[i - 1][j]) # Add b[j] to a[:i] (Delete)
return dp[n][m]
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/edit-distance/')
if __name__ == '__main__':
main()
|
|
129179d17e1ce1f866a57ef73a641abb9caccb03
|
dash-indexer.py
|
dash-indexer.py
|
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from itertools import count
from ts import TSPacket
from isobmff import SidxBox, SidxReference, StypBox
def index_media_segment(media_file_name, template, force, verbose):
random_access_points = defaultdict(list)
if verbose:
print("Reading media file", media_file_name)
with open(media_file_name, "rb") as f:
for byte_offset in count(step=TSPacket.SIZE):
ts_data = f.read(TSPacket.SIZE)
if not ts_data:
break
ts_packet = TSPacket(ts_data)
if ts_packet.random_access_indicator:
if verbose:
print("Found TS packet with random_access_indicator = 1 "
"at byte offset", byte_offset, "for PID",
ts_packet.pid)
random_access_points[ts_packet.pid].append(byte_offset)
boxes = [StypBox("sisx")]
for pid, byte_offsets in random_access_points.items():
sidx = SidxBox()
sidx.reference_id = pid
previous_start = None
for byte_offset in byte_offsets:
if previous_start is not None:
reference = SidxReference(SidxReference.ReferenceType.MEDIA)
reference.referenced_size = byte_offset - previous_start
sidx.references.append(reference)
previous_start = byte_offset
boxes.append(sidx)
if verbose:
print("Writing boxes:")
for box in boxes:
print(box)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("media_segment", help="The media segment to index.")
parser.add_argument("--template", "-t", help="Template for segment index "
"files. {s} will be replaced with the name of the media segment minus "
"the suffix (.ts).",
default="{s}.sidx")
parser.add_argument("--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
index_media_segment(args.media_segment, args.template, args.force,
args.verbose)
|
Write initial code to walk through the MPEG-TS file and find byte offsets.
|
Write initial code to walk through the MPEG-TS file and find byte offsets.
|
Python
|
bsd-2-clause
|
brendanlong/dash-ts-tools,brendanlong/dash-ts-tools
|
Write initial code to walk through the MPEG-TS file and find byte offsets.
|
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from itertools import count
from ts import TSPacket
from isobmff import SidxBox, SidxReference, StypBox
def index_media_segment(media_file_name, template, force, verbose):
random_access_points = defaultdict(list)
if verbose:
print("Reading media file", media_file_name)
with open(media_file_name, "rb") as f:
for byte_offset in count(step=TSPacket.SIZE):
ts_data = f.read(TSPacket.SIZE)
if not ts_data:
break
ts_packet = TSPacket(ts_data)
if ts_packet.random_access_indicator:
if verbose:
print("Found TS packet with random_access_indicator = 1 "
"at byte offset", byte_offset, "for PID",
ts_packet.pid)
random_access_points[ts_packet.pid].append(byte_offset)
boxes = [StypBox("sisx")]
for pid, byte_offsets in random_access_points.items():
sidx = SidxBox()
sidx.reference_id = pid
previous_start = None
for byte_offset in byte_offsets:
if previous_start is not None:
reference = SidxReference(SidxReference.ReferenceType.MEDIA)
reference.referenced_size = byte_offset - previous_start
sidx.references.append(reference)
previous_start = byte_offset
boxes.append(sidx)
if verbose:
print("Writing boxes:")
for box in boxes:
print(box)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("media_segment", help="The media segment to index.")
parser.add_argument("--template", "-t", help="Template for segment index "
"files. {s} will be replaced with the name of the media segment minus "
"the suffix (.ts).",
default="{s}.sidx")
parser.add_argument("--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
index_media_segment(args.media_segment, args.template, args.force,
args.verbose)
|
<commit_before><commit_msg>Write initial code to walk through the MPEG-TS file and find byte offsets.<commit_after>
|
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from itertools import count
from ts import TSPacket
from isobmff import SidxBox, SidxReference, StypBox
def index_media_segment(media_file_name, template, force, verbose):
random_access_points = defaultdict(list)
if verbose:
print("Reading media file", media_file_name)
with open(media_file_name, "rb") as f:
for byte_offset in count(step=TSPacket.SIZE):
ts_data = f.read(TSPacket.SIZE)
if not ts_data:
break
ts_packet = TSPacket(ts_data)
if ts_packet.random_access_indicator:
if verbose:
print("Found TS packet with random_access_indicator = 1 "
"at byte offset", byte_offset, "for PID",
ts_packet.pid)
random_access_points[ts_packet.pid].append(byte_offset)
boxes = [StypBox("sisx")]
for pid, byte_offsets in random_access_points.items():
sidx = SidxBox()
sidx.reference_id = pid
previous_start = None
for byte_offset in byte_offsets:
if previous_start is not None:
reference = SidxReference(SidxReference.ReferenceType.MEDIA)
reference.referenced_size = byte_offset - previous_start
sidx.references.append(reference)
previous_start = byte_offset
boxes.append(sidx)
if verbose:
print("Writing boxes:")
for box in boxes:
print(box)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("media_segment", help="The media segment to index.")
parser.add_argument("--template", "-t", help="Template for segment index "
"files. {s} will be replaced with the name of the media segment minus "
"the suffix (.ts).",
default="{s}.sidx")
parser.add_argument("--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
index_media_segment(args.media_segment, args.template, args.force,
args.verbose)
|
Write initial code to walk through the MPEG-TS file and find byte offsets.#!/usr/bin/env python3
import argparse
from collections import defaultdict
from itertools import count
from ts import TSPacket
from isobmff import SidxBox, SidxReference, StypBox
def index_media_segment(media_file_name, template, force, verbose):
random_access_points = defaultdict(list)
if verbose:
print("Reading media file", media_file_name)
with open(media_file_name, "rb") as f:
for byte_offset in count(step=TSPacket.SIZE):
ts_data = f.read(TSPacket.SIZE)
if not ts_data:
break
ts_packet = TSPacket(ts_data)
if ts_packet.random_access_indicator:
if verbose:
print("Found TS packet with random_access_indicator = 1 "
"at byte offset", byte_offset, "for PID",
ts_packet.pid)
random_access_points[ts_packet.pid].append(byte_offset)
boxes = [StypBox("sisx")]
for pid, byte_offsets in random_access_points.items():
sidx = SidxBox()
sidx.reference_id = pid
previous_start = None
for byte_offset in byte_offsets:
if previous_start is not None:
reference = SidxReference(SidxReference.ReferenceType.MEDIA)
reference.referenced_size = byte_offset - previous_start
sidx.references.append(reference)
previous_start = byte_offset
boxes.append(sidx)
if verbose:
print("Writing boxes:")
for box in boxes:
print(box)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("media_segment", help="The media segment to index.")
parser.add_argument("--template", "-t", help="Template for segment index "
"files. {s} will be replaced with the name of the media segment minus "
"the suffix (.ts).",
default="{s}.sidx")
parser.add_argument("--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
index_media_segment(args.media_segment, args.template, args.force,
args.verbose)
|
<commit_before><commit_msg>Write initial code to walk through the MPEG-TS file and find byte offsets.<commit_after>#!/usr/bin/env python3
import argparse
from collections import defaultdict
from itertools import count
from ts import TSPacket
from isobmff import SidxBox, SidxReference, StypBox
def index_media_segment(media_file_name, template, force, verbose):
random_access_points = defaultdict(list)
if verbose:
print("Reading media file", media_file_name)
with open(media_file_name, "rb") as f:
for byte_offset in count(step=TSPacket.SIZE):
ts_data = f.read(TSPacket.SIZE)
if not ts_data:
break
ts_packet = TSPacket(ts_data)
if ts_packet.random_access_indicator:
if verbose:
print("Found TS packet with random_access_indicator = 1 "
"at byte offset", byte_offset, "for PID",
ts_packet.pid)
random_access_points[ts_packet.pid].append(byte_offset)
boxes = [StypBox("sisx")]
for pid, byte_offsets in random_access_points.items():
sidx = SidxBox()
sidx.reference_id = pid
previous_start = None
for byte_offset in byte_offsets:
if previous_start is not None:
reference = SidxReference(SidxReference.ReferenceType.MEDIA)
reference.referenced_size = byte_offset - previous_start
sidx.references.append(reference)
previous_start = byte_offset
boxes.append(sidx)
if verbose:
print("Writing boxes:")
for box in boxes:
print(box)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("media_segment", help="The media segment to index.")
parser.add_argument("--template", "-t", help="Template for segment index "
"files. {s} will be replaced with the name of the media segment minus "
"the suffix (.ts).",
default="{s}.sidx")
parser.add_argument("--force", "-f", action="store_true", default=False,
help="Overwrite output files without prompting.")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Enable verbose output.")
args = parser.parse_args()
index_media_segment(args.media_segment, args.template, args.force,
args.verbose)
|
|
f98a20977c359189b506f765d6c2493fda7718a9
|
scripts/tests/off-center-rotation.py
|
scripts/tests/off-center-rotation.py
|
from paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/Users/seb/spherical'
# Initial ParaView scene setup
Cone(Center=[2,4,8])
Show()
view = Render()
view.CameraFocalPoint = [2,4,8]
view.CameraPosition = [2,4,0]
view.CenterOfRotation = [2,4,8]
view.CameraViewUp = [0,1,0]
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
Add a test for spherical camera with off center object
|
test(CenterOfRotation): Add a test for spherical camera with off center object
|
Python
|
bsd-3-clause
|
Kitware/tonic-data-generator,Kitware/tonic-data-generator
|
test(CenterOfRotation): Add a test for spherical camera with off center object
|
from paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/Users/seb/spherical'
# Initial ParaView scene setup
Cone(Center=[2,4,8])
Show()
view = Render()
view.CameraFocalPoint = [2,4,8]
view.CameraPosition = [2,4,0]
view.CenterOfRotation = [2,4,8]
view.CameraViewUp = [0,1,0]
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
<commit_before><commit_msg>test(CenterOfRotation): Add a test for spherical camera with off center object<commit_after>
|
from paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/Users/seb/spherical'
# Initial ParaView scene setup
Cone(Center=[2,4,8])
Show()
view = Render()
view.CameraFocalPoint = [2,4,8]
view.CameraPosition = [2,4,0]
view.CenterOfRotation = [2,4,8]
view.CameraViewUp = [0,1,0]
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
test(CenterOfRotation): Add a test for spherical camera with off center objectfrom paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/Users/seb/spherical'
# Initial ParaView scene setup
Cone(Center=[2,4,8])
Show()
view = Render()
view.CameraFocalPoint = [2,4,8]
view.CameraPosition = [2,4,0]
view.CenterOfRotation = [2,4,8]
view.CameraViewUp = [0,1,0]
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
<commit_before><commit_msg>test(CenterOfRotation): Add a test for spherical camera with off center object<commit_after>from paraview.simple import *
import tonic
from tonic import paraview as pv
dataset_destination_path = '/Users/seb/spherical'
# Initial ParaView scene setup
Cone(Center=[2,4,8])
Show()
view = Render()
view.CameraFocalPoint = [2,4,8]
view.CameraPosition = [2,4,0]
view.CenterOfRotation = [2,4,8]
view.CameraViewUp = [0,1,0]
view = Render()
# Choose data location
dh = tonic.DataHandler(dataset_destination_path)
camera = pv.create_spherical_camera(view, dh, range(0, 360, 30), range(-60, 61, 30))
# Create data
dh.registerData(name='image', type='blob', mimeType='image/png', fileName='.png')
# Loop over data
for pos in camera:
pv.update_camera(view, pos)
WriteImage(dh.getDataAbsoluteFilePath('image'))
# Write metadata
dh.writeDataDescriptor()
|
|
d752c73ec278eb36005dfe5ca62596e111970614
|
test/integration/ggrc/services/test_custom_attributes.py
|
test/integration/ggrc/services/test_custom_attributes.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class TestGlobalCustomAttributes(services.TestCase):
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _post(self, data):
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
def test_custom_attribute_post(self):
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
import ipdb; ipdb.set_trace()
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
|
Add tests for new CA post api
|
Add tests for new CA post api
|
Python
|
apache-2.0
|
josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core
|
Add tests for new CA post api
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class TestGlobalCustomAttributes(services.TestCase):
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _post(self, data):
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
def test_custom_attribute_post(self):
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
import ipdb; ipdb.set_trace()
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
|
<commit_before><commit_msg>Add tests for new CA post api<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class TestGlobalCustomAttributes(services.TestCase):
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _post(self, data):
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
def test_custom_attribute_post(self):
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
import ipdb; ipdb.set_trace()
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
|
Add tests for new CA post api# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class TestGlobalCustomAttributes(services.TestCase):
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _post(self, data):
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
def test_custom_attribute_post(self):
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
import ipdb; ipdb.set_trace()
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
|
<commit_before><commit_msg>Add tests for new CA post api<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class TestGlobalCustomAttributes(services.TestCase):
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _post(self, data):
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
def test_custom_attribute_post(self):
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
import ipdb; ipdb.set_trace()
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
|
|
c9b21c0076ee00364a7bc94f44671612402211f0
|
crt_sh.py
|
crt_sh.py
|
import urllib
import requests
from bs4 import BeautifulSoup
def req_crtsh(search_string):
subdomain_list = []
base_url = "https://crt.sh/?q=%25." + search_string
print("[+] Requesting URL %s" % base_url)
r = requests.get(url=base_url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'})
if r.ok:
soup = BeautifulSoup(r.content, 'lxml')
try:
table = soup.findAll('table')[2]
rows = table.find_all(['tr'])
for row in rows:
cells = row.find_all('td', limit=5)
if cells:
name = cells[3].text
subdomain_list.append(name)
# Remove duplicate domains from list
subdomain_list = list(set(subdomain_list))
remove_wildcard_list = []
for x in subdomain_list:
if "*." not in x:
remove_wildcard_list.append(x)
else:
print("[!] Detected wildcard domain %s" % x)
print("[!] Removing from subdomain list!")
# Debug: print all subdomains in list
for domain in remove_wildcard_list:
print("[+] Found SSL cert for subdomain: %s" % domain)
return remove_wildcard_list
except:
print("error retriving information")
req_crtsh("digg.com")
|
Add subdomain check using crt.sh
|
Add subdomain check using crt.sh
* Removes wildcard
* Gets all ssl certs for possible subdomains
|
Python
|
unlicense
|
nethunteros/punter
|
Add subdomain check using crt.sh
* Removes wildcard
* Gets all ssl certs for possible subdomains
|
import urllib
import requests
from bs4 import BeautifulSoup
def req_crtsh(search_string):
subdomain_list = []
base_url = "https://crt.sh/?q=%25." + search_string
print("[+] Requesting URL %s" % base_url)
r = requests.get(url=base_url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'})
if r.ok:
soup = BeautifulSoup(r.content, 'lxml')
try:
table = soup.findAll('table')[2]
rows = table.find_all(['tr'])
for row in rows:
cells = row.find_all('td', limit=5)
if cells:
name = cells[3].text
subdomain_list.append(name)
# Remove duplicate domains from list
subdomain_list = list(set(subdomain_list))
remove_wildcard_list = []
for x in subdomain_list:
if "*." not in x:
remove_wildcard_list.append(x)
else:
print("[!] Detected wildcard domain %s" % x)
print("[!] Removing from subdomain list!")
# Debug: print all subdomains in list
for domain in remove_wildcard_list:
print("[+] Found SSL cert for subdomain: %s" % domain)
return remove_wildcard_list
except:
print("error retriving information")
req_crtsh("digg.com")
|
<commit_before><commit_msg>Add subdomain check using crt.sh
* Removes wildcard
* Gets all ssl certs for possible subdomains<commit_after>
|
import urllib
import requests
from bs4 import BeautifulSoup
def req_crtsh(search_string):
subdomain_list = []
base_url = "https://crt.sh/?q=%25." + search_string
print("[+] Requesting URL %s" % base_url)
r = requests.get(url=base_url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'})
if r.ok:
soup = BeautifulSoup(r.content, 'lxml')
try:
table = soup.findAll('table')[2]
rows = table.find_all(['tr'])
for row in rows:
cells = row.find_all('td', limit=5)
if cells:
name = cells[3].text
subdomain_list.append(name)
# Remove duplicate domains from list
subdomain_list = list(set(subdomain_list))
remove_wildcard_list = []
for x in subdomain_list:
if "*." not in x:
remove_wildcard_list.append(x)
else:
print("[!] Detected wildcard domain %s" % x)
print("[!] Removing from subdomain list!")
# Debug: print all subdomains in list
for domain in remove_wildcard_list:
print("[+] Found SSL cert for subdomain: %s" % domain)
return remove_wildcard_list
except:
print("error retriving information")
req_crtsh("digg.com")
|
Add subdomain check using crt.sh
* Removes wildcard
* Gets all ssl certs for possible subdomainsimport urllib
import requests
from bs4 import BeautifulSoup
def req_crtsh(search_string):
subdomain_list = []
base_url = "https://crt.sh/?q=%25." + search_string
print("[+] Requesting URL %s" % base_url)
r = requests.get(url=base_url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'})
if r.ok:
soup = BeautifulSoup(r.content, 'lxml')
try:
table = soup.findAll('table')[2]
rows = table.find_all(['tr'])
for row in rows:
cells = row.find_all('td', limit=5)
if cells:
name = cells[3].text
subdomain_list.append(name)
# Remove duplicate domains from list
subdomain_list = list(set(subdomain_list))
remove_wildcard_list = []
for x in subdomain_list:
if "*." not in x:
remove_wildcard_list.append(x)
else:
print("[!] Detected wildcard domain %s" % x)
print("[!] Removing from subdomain list!")
# Debug: print all subdomains in list
for domain in remove_wildcard_list:
print("[+] Found SSL cert for subdomain: %s" % domain)
return remove_wildcard_list
except:
print("error retriving information")
req_crtsh("digg.com")
|
<commit_before><commit_msg>Add subdomain check using crt.sh
* Removes wildcard
* Gets all ssl certs for possible subdomains<commit_after>import urllib
import requests
from bs4 import BeautifulSoup
def req_crtsh(search_string):
subdomain_list = []
base_url = "https://crt.sh/?q=%25." + search_string
print("[+] Requesting URL %s" % base_url)
r = requests.get(url=base_url, headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'})
if r.ok:
soup = BeautifulSoup(r.content, 'lxml')
try:
table = soup.findAll('table')[2]
rows = table.find_all(['tr'])
for row in rows:
cells = row.find_all('td', limit=5)
if cells:
name = cells[3].text
subdomain_list.append(name)
# Remove duplicate domains from list
subdomain_list = list(set(subdomain_list))
remove_wildcard_list = []
for x in subdomain_list:
if "*." not in x:
remove_wildcard_list.append(x)
else:
print("[!] Detected wildcard domain %s" % x)
print("[!] Removing from subdomain list!")
# Debug: print all subdomains in list
for domain in remove_wildcard_list:
print("[+] Found SSL cert for subdomain: %s" % domain)
return remove_wildcard_list
except:
print("error retriving information")
req_crtsh("digg.com")
|
|
c0cebba170b8e3ea5e62e335536cf974bbbf08ec
|
zun/tests/unit/objects/test_fields.py
|
zun/tests/unit/objects/test_fields.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects.tests import test_fields
from zun.objects import fields
class TestContainerStatus(test_fields.TestField):
def setUp(self):
super(TestContainerStatus, self).setUp()
self.field = fields.ContainerStatus()
self.coerce_good_values = [
('Error', 'Error'),
('Running', 'Running'),
('Stopped', 'Stopped'),
('Paused', 'Paused'),
('Unknown', 'Unknown'),
('Creating', 'Creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'Error'",
self.field.stringify('Error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
class TestTaskState(test_fields.TestField):
def setUp(self):
super(TestTaskState, self).setUp()
self.field = fields.TaskState()
self.coerce_good_values = [
('image_pulling', 'image_pulling'),
('container_creating', 'container_creating'),
('sandbox_creating', 'sandbox_creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image_pulling'",
self.field.stringify('image_pulling'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
|
Add tests for custom object fields
|
Add tests for custom object fields
Change-Id: Ie2534fa1297f5260257bd5fc063af0629115a51f
|
Python
|
apache-2.0
|
kevin-zhaoshuai/zun,kevin-zhaoshuai/zun,kevin-zhaoshuai/zun
|
Add tests for custom object fields
Change-Id: Ie2534fa1297f5260257bd5fc063af0629115a51f
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects.tests import test_fields
from zun.objects import fields
class TestContainerStatus(test_fields.TestField):
def setUp(self):
super(TestContainerStatus, self).setUp()
self.field = fields.ContainerStatus()
self.coerce_good_values = [
('Error', 'Error'),
('Running', 'Running'),
('Stopped', 'Stopped'),
('Paused', 'Paused'),
('Unknown', 'Unknown'),
('Creating', 'Creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'Error'",
self.field.stringify('Error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
class TestTaskState(test_fields.TestField):
def setUp(self):
super(TestTaskState, self).setUp()
self.field = fields.TaskState()
self.coerce_good_values = [
('image_pulling', 'image_pulling'),
('container_creating', 'container_creating'),
('sandbox_creating', 'sandbox_creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image_pulling'",
self.field.stringify('image_pulling'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
|
<commit_before><commit_msg>Add tests for custom object fields
Change-Id: Ie2534fa1297f5260257bd5fc063af0629115a51f<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects.tests import test_fields
from zun.objects import fields
class TestContainerStatus(test_fields.TestField):
def setUp(self):
super(TestContainerStatus, self).setUp()
self.field = fields.ContainerStatus()
self.coerce_good_values = [
('Error', 'Error'),
('Running', 'Running'),
('Stopped', 'Stopped'),
('Paused', 'Paused'),
('Unknown', 'Unknown'),
('Creating', 'Creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'Error'",
self.field.stringify('Error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
class TestTaskState(test_fields.TestField):
def setUp(self):
super(TestTaskState, self).setUp()
self.field = fields.TaskState()
self.coerce_good_values = [
('image_pulling', 'image_pulling'),
('container_creating', 'container_creating'),
('sandbox_creating', 'sandbox_creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image_pulling'",
self.field.stringify('image_pulling'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
|
Add tests for custom object fields
Change-Id: Ie2534fa1297f5260257bd5fc063af0629115a51f# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects.tests import test_fields
from zun.objects import fields
class TestContainerStatus(test_fields.TestField):
def setUp(self):
super(TestContainerStatus, self).setUp()
self.field = fields.ContainerStatus()
self.coerce_good_values = [
('Error', 'Error'),
('Running', 'Running'),
('Stopped', 'Stopped'),
('Paused', 'Paused'),
('Unknown', 'Unknown'),
('Creating', 'Creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'Error'",
self.field.stringify('Error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
class TestTaskState(test_fields.TestField):
def setUp(self):
super(TestTaskState, self).setUp()
self.field = fields.TaskState()
self.coerce_good_values = [
('image_pulling', 'image_pulling'),
('container_creating', 'container_creating'),
('sandbox_creating', 'sandbox_creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image_pulling'",
self.field.stringify('image_pulling'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
|
<commit_before><commit_msg>Add tests for custom object fields
Change-Id: Ie2534fa1297f5260257bd5fc063af0629115a51f<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects.tests import test_fields
from zun.objects import fields
class TestContainerStatus(test_fields.TestField):
def setUp(self):
super(TestContainerStatus, self).setUp()
self.field = fields.ContainerStatus()
self.coerce_good_values = [
('Error', 'Error'),
('Running', 'Running'),
('Stopped', 'Stopped'),
('Paused', 'Paused'),
('Unknown', 'Unknown'),
('Creating', 'Creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'Error'",
self.field.stringify('Error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
class TestTaskState(test_fields.TestField):
def setUp(self):
super(TestTaskState, self).setUp()
self.field = fields.TaskState()
self.coerce_good_values = [
('image_pulling', 'image_pulling'),
('container_creating', 'container_creating'),
('sandbox_creating', 'sandbox_creating'),
]
self.coerce_bad_values = ['bad_value']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'image_pulling'",
self.field.stringify('image_pulling'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'bad_value')
|
|
0cccff9b6a72b61a1b6cdfeb3ed5be525e74b822
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": [
"pep8",
"pylint",
"pytest",
],
},
tests_require=[
"pytest",
],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": ["pytest"],
},
tests_require=["pytest"],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
Remove old and uneeded test dependencies
|
Remove old and uneeded test dependencies
|
Python
|
apache-2.0
|
dstufft/pynacl,xueyumusic/pynacl,dstufft/pynacl,pyca/pynacl,lmctv/pynacl,scholarly/pynacl,dstufft/pynacl,alex/pynacl,Insoleet/cutecoin,xueyumusic/pynacl,JackWink/pynacl,reaperhulk/pynacl,lmctv/pynacl,scholarly/pynacl,xueyumusic/pynacl,hoffmabc/pynacl,JackWink/pynacl,ucoin-io/cutecoin,lmctv/pynacl,JackWink/pynacl,xueyumusic/pynacl,reaperhulk/pynacl,hoffmabc/pynacl,pyca/pynacl,lmctv/pynacl,scholarly/pynacl,ucoin-io/cutecoin,JackWink/pynacl,reaperhulk/pynacl,ucoin-bot/cutecoin,scholarly/pynacl,pyca/pynacl,alex/pynacl,alex/pynacl,hoffmabc/pynacl,pyca/pynacl,ucoin-io/cutecoin,lmctv/pynacl,reaperhulk/pynacl,alex/pynacl,dstufft/pynacl,pyca/pynacl,reaperhulk/pynacl
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": [
"pep8",
"pylint",
"pytest",
],
},
tests_require=[
"pytest",
],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
Remove old and uneeded test dependencies
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": ["pytest"],
},
tests_require=["pytest"],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": [
"pep8",
"pylint",
"pytest",
],
},
tests_require=[
"pytest",
],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
<commit_msg>Remove old and uneeded test dependencies<commit_after>
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": ["pytest"],
},
tests_require=["pytest"],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": [
"pep8",
"pylint",
"pytest",
],
},
tests_require=[
"pytest",
],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
Remove old and uneeded test dependencies#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": ["pytest"],
},
tests_require=["pytest"],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": [
"pep8",
"pylint",
"pytest",
],
},
tests_require=[
"pytest",
],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
<commit_msg>Remove old and uneeded test dependencies<commit_after>#!/usr/bin/env python
from setuptools import setup
__about__ = {}
with open("nacl/__about__.py") as fp:
exec(fp.read(), None, __about__)
try:
import nacl.nacl
except ImportError:
# installing - there is no cffi yet
ext_modules = []
else:
# building bdist - cffi is here!
ext_modules = [nacl.nacl.ffi.verifier.get_extension()]
setup(
name=__about__["__title__"],
version=__about__["__version__"],
description=__about__["__summary__"],
long_description=open("README.rst").read(),
url=__about__["__uri__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
install_requires=[
"cffi",
],
extras_require={
"tests": ["pytest"],
},
tests_require=["pytest"],
packages=[
"nacl",
],
ext_package="nacl",
ext_modules=ext_modules,
zip_safe=False,
)
|
ae283eb36026dfe092c6f69d310055f8e373d953
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0', 'simplejson']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
Add simplejson as a required module
|
Add simplejson as a required module
https://mediawiki.org/wiki/Special:Code/pywikipedia/11666
|
Python
|
mit
|
smalyshev/pywikibot-core,xZise/pywikibot-core,TridevGuha/pywikibot-core,wikimedia/pywikibot-core,happy5214/pywikibot-core,wikimedia/pywikibot-core,hasteur/g13bot_tools_new,h4ck3rm1k3/pywikibot-core,magul/pywikibot-core,hasteur/g13bot_tools_new,trishnaguha/pywikibot-core,h4ck3rm1k3/pywikibot-core,npdoty/pywikibot,magul/pywikibot-core,VcamX/pywikibot-core,Darkdadaah/pywikibot-core,Darkdadaah/pywikibot-core,darthbhyrava/pywikibot-local,icyflame/batman,PersianWikipedia/pywikibot-core,happy5214/pywikibot-core,jayvdb/pywikibot-core,hasteur/g13bot_tools_new,emijrp/pywikibot-core,npdoty/pywikibot,jayvdb/pywikibot-core,valhallasw/pywikibot-core
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
Add simplejson as a required module
https://mediawiki.org/wiki/Special:Code/pywikipedia/11666
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0', 'simplejson']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
<commit_before># -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
<commit_msg>Add simplejson as a required module
https://mediawiki.org/wiki/Special:Code/pywikipedia/11666<commit_after>
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0', 'simplejson']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
Add simplejson as a required module
https://mediawiki.org/wiki/Special:Code/pywikipedia/11666# -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0', 'simplejson']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
<commit_before># -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
<commit_msg>Add simplejson as a required module
https://mediawiki.org/wiki/Special:Code/pywikipedia/11666<commit_after># -*- coding: utf-8 -*-
"""installer script for pywikibot 2.0 framework"""
#
# (C) Pywikipedia team, 2009-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
if sys.version_info[0] != 2:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2")
if sys.version_info[1] < 6:
raise RuntimeError("ERROR: Pywikipediabot only runs under Python 2.6 or higher")
else:
depend = ['httplib2>=0.6.0', 'simplejson']
setup(name='Pywikipediabot',
version='2.0alpha',
description='Python Wikipedia Bot Framework',
license='MIT',
packages=find_packages(),
install_requires=depend,
test_suite="tests",
)
# automatically launch generate_user_files.py
import subprocess
python = sys.executable
python = python.replace("pythonw.exe", "python.exe") # for Windows
ignore = subprocess.call([python, "generate_user_files.py"])
|
0ea6f45792a3e1ce321f774a8582171fe4d39ad0
|
demo/python-overview/intro.py
|
demo/python-overview/intro.py
|
# Hello world
print('Hello, world!')
# Indentation is important in Python!
x = 1
if x == 1:
print('x is 1')
# Numbers and Strings
my_int = 8
print(my_int)
my_float = 1.4
print(my_float)
a, b, c = 1, 2, 3
print(a, b, c)
my_string = 'hello'
print(my_string)
my_string = "hello"
print(my_string)
# Operators
one = 1
two = 2
three = one + two
print(one < two)
print(one > two)
print(two > one)
print(three == 3)
helloworld = 'hello' + ' ' + 'world'
print(helloworld)
print(one + helloworld)
# Lists
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist)
print(mylist[0])
print(mylist[1])
print(mylist[2])
print(mylist[3]) # IndexError
print(len(mylist))
print(0 in mylist)
print(4 not in mylist)
x, y, z = mylist
print(x)
print(y)
print(z)
# Looping over lists
for item in mylist:
print(item)
mylist = [1, "Alice", True, ['Bill']]
# More Operators
print(7 + 3.0) # addition
print(2 ** 3) # exponents
print(3 - 4) # subtraction
print(3 / 2) # division
print(3 // 2) # integer division
print(14 % 12) # remainder. like clock arithmetic
# Operators on Strings
print('hello' + ' ' + 'world') # concatenation
print('hello'*5) # repeat
# Operators on Lists
print([1, 2, 3] + [7, 8, 9])
print([1, 2, 3] * 3)
# Formatting Strings
answer = 42
print(f'the answer is {number}')
# Dictionary
ages = {'alice': 23, 'bob': 12, 'frank': 66}
# Functions
def square(x):
return x ** 2
print(square(2))
def say_hello():
print('hello')
x = say_hello()
print(x)
def square_multiple(a, b, c):
return a ** 2, b ** 2, c ** 2
result = square_multiple(1, 2, 3)
print(result)
x, y, z = result
# Looping
x = 10
while x > 0:
print(x)
x -= 1
|
Add basic python for notes
|
Add basic python for notes
|
Python
|
mit
|
HackBinghamton/club,HackBinghamton/club,HackBinghamton/club
|
Add basic python for notes
|
# Hello world
print('Hello, world!')
# Indentation is important in Python!
x = 1
if x == 1:
print('x is 1')
# Numbers and Strings
my_int = 8
print(my_int)
my_float = 1.4
print(my_float)
a, b, c = 1, 2, 3
print(a, b, c)
my_string = 'hello'
print(my_string)
my_string = "hello"
print(my_string)
# Operators
one = 1
two = 2
three = one + two
print(one < two)
print(one > two)
print(two > one)
print(three == 3)
helloworld = 'hello' + ' ' + 'world'
print(helloworld)
print(one + helloworld)
# Lists
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist)
print(mylist[0])
print(mylist[1])
print(mylist[2])
print(mylist[3]) # IndexError
print(len(mylist))
print(0 in mylist)
print(4 not in mylist)
x, y, z = mylist
print(x)
print(y)
print(z)
# Looping over lists
for item in mylist:
print(item)
mylist = [1, "Alice", True, ['Bill']]
# More Operators
print(7 + 3.0) # addition
print(2 ** 3) # exponents
print(3 - 4) # subtraction
print(3 / 2) # division
print(3 // 2) # integer division
print(14 % 12) # remainder. like clock arithmetic
# Operators on Strings
print('hello' + ' ' + 'world') # concatenation
print('hello'*5) # repeat
# Operators on Lists
print([1, 2, 3] + [7, 8, 9])
print([1, 2, 3] * 3)
# Formatting Strings
answer = 42
print(f'the answer is {number}')
# Dictionary
ages = {'alice': 23, 'bob': 12, 'frank': 66}
# Functions
def square(x):
return x ** 2
print(square(2))
def say_hello():
print('hello')
x = say_hello()
print(x)
def square_multiple(a, b, c):
return a ** 2, b ** 2, c ** 2
result = square_multiple(1, 2, 3)
print(result)
x, y, z = result
# Looping
x = 10
while x > 0:
print(x)
x -= 1
|
<commit_before><commit_msg>Add basic python for notes<commit_after>
|
# Hello world
print('Hello, world!')
# Indentation is important in Python!
x = 1
if x == 1:
print('x is 1')
# Numbers and Strings
my_int = 8
print(my_int)
my_float = 1.4
print(my_float)
a, b, c = 1, 2, 3
print(a, b, c)
my_string = 'hello'
print(my_string)
my_string = "hello"
print(my_string)
# Operators
one = 1
two = 2
three = one + two
print(one < two)
print(one > two)
print(two > one)
print(three == 3)
helloworld = 'hello' + ' ' + 'world'
print(helloworld)
print(one + helloworld)
# Lists
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist)
print(mylist[0])
print(mylist[1])
print(mylist[2])
print(mylist[3]) # IndexError
print(len(mylist))
print(0 in mylist)
print(4 not in mylist)
x, y, z = mylist
print(x)
print(y)
print(z)
# Looping over lists
for item in mylist:
print(item)
mylist = [1, "Alice", True, ['Bill']]
# More Operators
print(7 + 3.0) # addition
print(2 ** 3) # exponents
print(3 - 4) # subtraction
print(3 / 2) # division
print(3 // 2) # integer division
print(14 % 12) # remainder. like clock arithmetic
# Operators on Strings
print('hello' + ' ' + 'world') # concatenation
print('hello'*5) # repeat
# Operators on Lists
print([1, 2, 3] + [7, 8, 9])
print([1, 2, 3] * 3)
# Formatting Strings
answer = 42
print(f'the answer is {number}')
# Dictionary
ages = {'alice': 23, 'bob': 12, 'frank': 66}
# Functions
def square(x):
return x ** 2
print(square(2))
def say_hello():
print('hello')
x = say_hello()
print(x)
def square_multiple(a, b, c):
return a ** 2, b ** 2, c ** 2
result = square_multiple(1, 2, 3)
print(result)
x, y, z = result
# Looping
x = 10
while x > 0:
print(x)
x -= 1
|
Add basic python for notes# Hello world
print('Hello, world!')
# Indentation is important in Python!
x = 1
if x == 1:
print('x is 1')
# Numbers and Strings
my_int = 8
print(my_int)
my_float = 1.4
print(my_float)
a, b, c = 1, 2, 3
print(a, b, c)
my_string = 'hello'
print(my_string)
my_string = "hello"
print(my_string)
# Operators
one = 1
two = 2
three = one + two
print(one < two)
print(one > two)
print(two > one)
print(three == 3)
helloworld = 'hello' + ' ' + 'world'
print(helloworld)
print(one + helloworld)
# Lists
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist)
print(mylist[0])
print(mylist[1])
print(mylist[2])
print(mylist[3]) # IndexError
print(len(mylist))
print(0 in mylist)
print(4 not in mylist)
x, y, z = mylist
print(x)
print(y)
print(z)
# Looping over lists
for item in mylist:
print(item)
mylist = [1, "Alice", True, ['Bill']]
# More Operators
print(7 + 3.0) # addition
print(2 ** 3) # exponents
print(3 - 4) # subtraction
print(3 / 2) # division
print(3 // 2) # integer division
print(14 % 12) # remainder. like clock arithmetic
# Operators on Strings
print('hello' + ' ' + 'world') # concatenation
print('hello'*5) # repeat
# Operators on Lists
print([1, 2, 3] + [7, 8, 9])
print([1, 2, 3] * 3)
# Formatting Strings
answer = 42
print(f'the answer is {number}')
# Dictionary
ages = {'alice': 23, 'bob': 12, 'frank': 66}
# Functions
def square(x):
return x ** 2
print(square(2))
def say_hello():
print('hello')
x = say_hello()
print(x)
def square_multiple(a, b, c):
return a ** 2, b ** 2, c ** 2
result = square_multiple(1, 2, 3)
print(result)
x, y, z = result
# Looping
x = 10
while x > 0:
print(x)
x -= 1
|
<commit_before><commit_msg>Add basic python for notes<commit_after># Hello world
print('Hello, world!')
# Indentation is important in Python!
x = 1
if x == 1:
print('x is 1')
# Numbers and Strings
my_int = 8
print(my_int)
my_float = 1.4
print(my_float)
a, b, c = 1, 2, 3
print(a, b, c)
my_string = 'hello'
print(my_string)
my_string = "hello"
print(my_string)
# Operators
one = 1
two = 2
three = one + two
print(one < two)
print(one > two)
print(two > one)
print(three == 3)
helloworld = 'hello' + ' ' + 'world'
print(helloworld)
print(one + helloworld)
# Lists
mylist = []
mylist.append(1)
mylist.append(2)
mylist.append(3)
print(mylist)
print(mylist[0])
print(mylist[1])
print(mylist[2])
print(mylist[3]) # IndexError
print(len(mylist))
print(0 in mylist)
print(4 not in mylist)
x, y, z = mylist
print(x)
print(y)
print(z)
# Looping over lists
for item in mylist:
print(item)
mylist = [1, "Alice", True, ['Bill']]
# More Operators
print(7 + 3.0) # addition
print(2 ** 3) # exponents
print(3 - 4) # subtraction
print(3 / 2) # division
print(3 // 2) # integer division
print(14 % 12) # remainder. like clock arithmetic
# Operators on Strings
print('hello' + ' ' + 'world') # concatenation
print('hello'*5) # repeat
# Operators on Lists
print([1, 2, 3] + [7, 8, 9])
print([1, 2, 3] * 3)
# Formatting Strings
answer = 42
print(f'the answer is {number}')
# Dictionary
ages = {'alice': 23, 'bob': 12, 'frank': 66}
# Functions
def square(x):
return x ** 2
print(square(2))
def say_hello():
print('hello')
x = say_hello()
print(x)
def square_multiple(a, b, c):
return a ** 2, b ** 2, c ** 2
result = square_multiple(1, 2, 3)
print(result)
x, y, z = result
# Looping
x = 10
while x > 0:
print(x)
x -= 1
|
|
0241bbc9b370545d7d1270fa391fd9347b9c39a6
|
test/os_linux_ubuntu.py
|
test/os_linux_ubuntu.py
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_ubuntu import MbedLsToolsUbuntu
class UbuntuPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_ubuntu = MbedLsToolsUbuntu()
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("Ubuntu", self.linux_ubuntu.os_supported)
if __name__ == '__main__':
unittest.main()
|
Add basic unit tests for Ubuntu specific platform
|
Add basic unit tests for Ubuntu specific platform
|
Python
|
apache-2.0
|
mtmtech/mbed-ls,jupe/mbed-ls,mazimkhan/mbed-ls,jjones646/mbed-ls,jupe/mbed-ls,jjones646/mbed-ls,mtmtech/mbed-ls,mazimkhan/mbed-ls
|
Add basic unit tests for Ubuntu specific platform
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_ubuntu import MbedLsToolsUbuntu
class UbuntuPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_ubuntu = MbedLsToolsUbuntu()
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("Ubuntu", self.linux_ubuntu.os_supported)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit tests for Ubuntu specific platform<commit_after>
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_ubuntu import MbedLsToolsUbuntu
class UbuntuPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_ubuntu = MbedLsToolsUbuntu()
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("Ubuntu", self.linux_ubuntu.os_supported)
if __name__ == '__main__':
unittest.main()
|
Add basic unit tests for Ubuntu specific platform#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_ubuntu import MbedLsToolsUbuntu
class UbuntuPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_ubuntu = MbedLsToolsUbuntu()
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("Ubuntu", self.linux_ubuntu.os_supported)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic unit tests for Ubuntu specific platform<commit_after>#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_ubuntu import MbedLsToolsUbuntu
class UbuntuPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_ubuntu = MbedLsToolsUbuntu()
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("Ubuntu", self.linux_ubuntu.os_supported)
if __name__ == '__main__':
unittest.main()
|
|
b2479b41672b0fafaabd42dc3dde56328f77ab0b
|
tests/test_addresses.py
|
tests/test_addresses.py
|
import json
import random
import pytest
from zephyrus.addresses import Participants
from zephyrus.exceptions import ImproperlyConfigured
@pytest.fixture(scope='session')
def config_json():
return {
"simulation": "tcp://127.0.0.1:7000",
"strategy": "tcp://127.0.0.1:5000",
"tester": "tcp://127.0.0.1:6600",
"tester_par": "tcp://127.0.0.1:6601",
"tester_est": "tcp://127.0.0.1:6605",
"monitor": "tcp://127.0.0.1:6500",
"environment": "tcp://127.0.0.1:6000",
"agent": "tcp://127.0.0.1:6001"
}
@pytest.fixture(scope='session')
def address_configuration_file(tmpdir_factory, config_json):
filename = tmpdir_factory.mktemp('conf').join('addresses.json')
with open(str(filename), 'w') as output:
json.dump(config_json, output)
return filename
def test_participants_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
assert participants.alias(address) == alias
assert participants.address(alias) == address
def test_participants_invalid_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
with pytest.raises(KeyError):
participants.alias(address + address)
with pytest.raises(KeyError):
participants.address(alias + alias)
def test_participants_invalid_config_file(tmpdir):
p = tmpdir.mkdir("foo").join("fakeconfig.json")
p.write("[invalid gibberish]")
with pytest.raises(ImproperlyConfigured):
Participants(str(p))
|
Add tests to addresses module
|
Add tests to addresses module
|
Python
|
mit
|
wairton/zephyrus-mas
|
Add tests to addresses module
|
import json
import random
import pytest
from zephyrus.addresses import Participants
from zephyrus.exceptions import ImproperlyConfigured
@pytest.fixture(scope='session')
def config_json():
return {
"simulation": "tcp://127.0.0.1:7000",
"strategy": "tcp://127.0.0.1:5000",
"tester": "tcp://127.0.0.1:6600",
"tester_par": "tcp://127.0.0.1:6601",
"tester_est": "tcp://127.0.0.1:6605",
"monitor": "tcp://127.0.0.1:6500",
"environment": "tcp://127.0.0.1:6000",
"agent": "tcp://127.0.0.1:6001"
}
@pytest.fixture(scope='session')
def address_configuration_file(tmpdir_factory, config_json):
filename = tmpdir_factory.mktemp('conf').join('addresses.json')
with open(str(filename), 'w') as output:
json.dump(config_json, output)
return filename
def test_participants_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
assert participants.alias(address) == alias
assert participants.address(alias) == address
def test_participants_invalid_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
with pytest.raises(KeyError):
participants.alias(address + address)
with pytest.raises(KeyError):
participants.address(alias + alias)
def test_participants_invalid_config_file(tmpdir):
p = tmpdir.mkdir("foo").join("fakeconfig.json")
p.write("[invalid gibberish]")
with pytest.raises(ImproperlyConfigured):
Participants(str(p))
|
<commit_before><commit_msg>Add tests to addresses module<commit_after>
|
import json
import random
import pytest
from zephyrus.addresses import Participants
from zephyrus.exceptions import ImproperlyConfigured
@pytest.fixture(scope='session')
def config_json():
return {
"simulation": "tcp://127.0.0.1:7000",
"strategy": "tcp://127.0.0.1:5000",
"tester": "tcp://127.0.0.1:6600",
"tester_par": "tcp://127.0.0.1:6601",
"tester_est": "tcp://127.0.0.1:6605",
"monitor": "tcp://127.0.0.1:6500",
"environment": "tcp://127.0.0.1:6000",
"agent": "tcp://127.0.0.1:6001"
}
@pytest.fixture(scope='session')
def address_configuration_file(tmpdir_factory, config_json):
filename = tmpdir_factory.mktemp('conf').join('addresses.json')
with open(str(filename), 'w') as output:
json.dump(config_json, output)
return filename
def test_participants_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
assert participants.alias(address) == alias
assert participants.address(alias) == address
def test_participants_invalid_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
with pytest.raises(KeyError):
participants.alias(address + address)
with pytest.raises(KeyError):
participants.address(alias + alias)
def test_participants_invalid_config_file(tmpdir):
p = tmpdir.mkdir("foo").join("fakeconfig.json")
p.write("[invalid gibberish]")
with pytest.raises(ImproperlyConfigured):
Participants(str(p))
|
Add tests to addresses moduleimport json
import random
import pytest
from zephyrus.addresses import Participants
from zephyrus.exceptions import ImproperlyConfigured
@pytest.fixture(scope='session')
def config_json():
return {
"simulation": "tcp://127.0.0.1:7000",
"strategy": "tcp://127.0.0.1:5000",
"tester": "tcp://127.0.0.1:6600",
"tester_par": "tcp://127.0.0.1:6601",
"tester_est": "tcp://127.0.0.1:6605",
"monitor": "tcp://127.0.0.1:6500",
"environment": "tcp://127.0.0.1:6000",
"agent": "tcp://127.0.0.1:6001"
}
@pytest.fixture(scope='session')
def address_configuration_file(tmpdir_factory, config_json):
filename = tmpdir_factory.mktemp('conf').join('addresses.json')
with open(str(filename), 'w') as output:
json.dump(config_json, output)
return filename
def test_participants_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
assert participants.alias(address) == alias
assert participants.address(alias) == address
def test_participants_invalid_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
with pytest.raises(KeyError):
participants.alias(address + address)
with pytest.raises(KeyError):
participants.address(alias + alias)
def test_participants_invalid_config_file(tmpdir):
p = tmpdir.mkdir("foo").join("fakeconfig.json")
p.write("[invalid gibberish]")
with pytest.raises(ImproperlyConfigured):
Participants(str(p))
|
<commit_before><commit_msg>Add tests to addresses module<commit_after>import json
import random
import pytest
from zephyrus.addresses import Participants
from zephyrus.exceptions import ImproperlyConfigured
@pytest.fixture(scope='session')
def config_json():
return {
"simulation": "tcp://127.0.0.1:7000",
"strategy": "tcp://127.0.0.1:5000",
"tester": "tcp://127.0.0.1:6600",
"tester_par": "tcp://127.0.0.1:6601",
"tester_est": "tcp://127.0.0.1:6605",
"monitor": "tcp://127.0.0.1:6500",
"environment": "tcp://127.0.0.1:6000",
"agent": "tcp://127.0.0.1:6001"
}
@pytest.fixture(scope='session')
def address_configuration_file(tmpdir_factory, config_json):
filename = tmpdir_factory.mktemp('conf').join('addresses.json')
with open(str(filename), 'w') as output:
json.dump(config_json, output)
return filename
def test_participants_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
assert participants.alias(address) == alias
assert participants.address(alias) == address
def test_participants_invalid_alias_and_address(address_configuration_file, config_json):
participants = Participants(str(address_configuration_file))
alias, address = random.choice(list(config_json.items()))
with pytest.raises(KeyError):
participants.alias(address + address)
with pytest.raises(KeyError):
participants.address(alias + alias)
def test_participants_invalid_config_file(tmpdir):
p = tmpdir.mkdir("foo").join("fakeconfig.json")
p.write("[invalid gibberish]")
with pytest.raises(ImproperlyConfigured):
Participants(str(p))
|
|
f290736a1b5c8dd4616462d97fdf7809d78604ad
|
tests/test_lotka_volterra.py
|
tests/test_lotka_volterra.py
|
#! /usr/bin/env python
from nose.tools import assert_equal, assert_almost_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
from helper import table_exist, find_ids, exit_times
def test_minimal_cycle():
"""Tests simulations with Preys and Predators
Preys offer a resource representing itself. Predators acquire the resources
of the preys. When the prey's resource gets accepted, it decommissions
itself. Preys and predators have a fixed life expectancy. However, if
a predator does not get any resource for several time steps,
it must decommission itself as well.
After certain time steps, predators and preys reproduce and deploy new
predators and preys respectively.
Oscillating behavior is expected.
"""
# A reference simulation input for Lotka-Volterra simulation
sim_input = "./Inputs/lotka_volterra.xml"
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, table_exist(output, paths)
if not table_exist(output, paths):
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
return # don't execute further commands
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
agent_protos = agent_entry["Prototype"]
duration = info["Duration"][0]
# Track transacted resources
quantities = resources["Quantity"]
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
|
Add a test file for Lotka-Volterra simulation.
|
Add a test file for Lotka-Volterra simulation.
|
Python
|
bsd-3-clause
|
Baaaaam/cyclus,gidden/cyclus,Baaaaam/cyclus,gidden/cyclus,hodger/cyclus,hodger/cyclus,mbmcgarry/cyclus,gidden/cyclus,hodger/cyclus,Baaaaam/cyclus,mbmcgarry/cyclus,gidden/cyclus,mbmcgarry/cyclus,hodger/cyclus,hodger/cyclus,rwcarlsen/cyclus,rwcarlsen/cyclus,mbmcgarry/cyclus,rwcarlsen/cyclus,rwcarlsen/cyclus
|
Add a test file for Lotka-Volterra simulation.
|
#! /usr/bin/env python
from nose.tools import assert_equal, assert_almost_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
from helper import table_exist, find_ids, exit_times
def test_minimal_cycle():
"""Tests simulations with Preys and Predators
Preys offer a resource representing itself. Predators acquire the resources
of the preys. When the prey's resource gets accepted, it decommissions
itself. Preys and predators have a fixed life expectancy. However, if
a predator does not get any resource for several time steps,
it must decommission itself as well.
After certain time steps, predators and preys reproduce and deploy new
predators and preys respectively.
Oscillating behavior is expected.
"""
# A reference simulation input for Lotka-Volterra simulation
sim_input = "./Inputs/lotka_volterra.xml"
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, table_exist(output, paths)
if not table_exist(output, paths):
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
return # don't execute further commands
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
agent_protos = agent_entry["Prototype"]
duration = info["Duration"][0]
# Track transacted resources
quantities = resources["Quantity"]
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
|
<commit_before><commit_msg>Add a test file for Lotka-Volterra simulation.<commit_after>
|
#! /usr/bin/env python
from nose.tools import assert_equal, assert_almost_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
from helper import table_exist, find_ids, exit_times
def test_minimal_cycle():
"""Tests simulations with Preys and Predators
Preys offer a resource representing itself. Predators acquire the resources
of the preys. When the prey's resource gets accepted, it decommissions
itself. Preys and predators have a fixed life expectancy. However, if
a predator does not get any resource for several time steps,
it must decommission itself as well.
After certain time steps, predators and preys reproduce and deploy new
predators and preys respectively.
Oscillating behavior is expected.
"""
# A reference simulation input for Lotka-Volterra simulation
sim_input = "./Inputs/lotka_volterra.xml"
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, table_exist(output, paths)
if not table_exist(output, paths):
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
return # don't execute further commands
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
agent_protos = agent_entry["Prototype"]
duration = info["Duration"][0]
# Track transacted resources
quantities = resources["Quantity"]
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
|
Add a test file for Lotka-Volterra simulation.#! /usr/bin/env python
from nose.tools import assert_equal, assert_almost_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
from helper import table_exist, find_ids, exit_times
def test_minimal_cycle():
"""Tests simulations with Preys and Predators
Preys offer a resource representing itself. Predators acquire the resources
of the preys. When the prey's resource gets accepted, it decommissions
itself. Preys and predators have a fixed life expectancy. However, if
a predator does not get any resource for several time steps,
it must decommission itself as well.
After certain time steps, predators and preys reproduce and deploy new
predators and preys respectively.
Oscillating behavior is expected.
"""
# A reference simulation input for Lotka-Volterra simulation
sim_input = "./Inputs/lotka_volterra.xml"
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, table_exist(output, paths)
if not table_exist(output, paths):
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
return # don't execute further commands
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
agent_protos = agent_entry["Prototype"]
duration = info["Duration"][0]
# Track transacted resources
quantities = resources["Quantity"]
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
|
<commit_before><commit_msg>Add a test file for Lotka-Volterra simulation.<commit_after>#! /usr/bin/env python
from nose.tools import assert_equal, assert_almost_equal, assert_true
from numpy.testing import assert_array_equal
import os
import tables
import numpy as np
from tools import check_cmd
from helper import table_exist, find_ids, exit_times
def test_minimal_cycle():
"""Tests simulations with Preys and Predators
Preys offer a resource representing itself. Predators acquire the resources
of the preys. When the prey's resource gets accepted, it decommissions
itself. Preys and predators have a fixed life expectancy. However, if
a predator does not get any resource for several time steps,
it must decommission itself as well.
After certain time steps, predators and preys reproduce and deploy new
predators and preys respectively.
Oscillating behavior is expected.
"""
# A reference simulation input for Lotka-Volterra simulation
sim_input = "./Inputs/lotka_volterra.xml"
holdsrtn = [1] # needed because nose does not send() to test generator
cmd = ["cyclus", "-o", "./output_temp.h5", "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
output = tables.open_file("./output_temp.h5", mode = "r")
# tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, table_exist(output, paths)
if not table_exist(output, paths):
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
return # don't execute further commands
# Get specific tables and columns
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
# Find agent ids
agent_ids = agent_entry["AgentId"]
agent_impl = agent_entry["Implementation"]
agent_protos = agent_entry["Prototype"]
duration = info["Duration"][0]
# Track transacted resources
quantities = resources["Quantity"]
output.close()
os.remove("./output_temp.h5")
# This is a starter sqlite db created implicitly
os.remove("./output_temp.sqlite")
|
|
cc9f7d0715d104868684d9213c1ca84f7b75aadb
|
tests/fileformats/test_linereader.py
|
tests/fileformats/test_linereader.py
|
import glob
from mrs.fileformats import LineReader
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
PY3 = sys.version_info[0] == 3
def test_dickens():
with open('tests/data/dickens/split1.txt', 'rb') as f:
reader = LineReader(f)
lines = list(reader)
assert len(lines) == 2
key, value = lines[0]
assert key == 0
assert value == 'It was the best of times,\n'
key, value = lines[1]
assert key == 1
assert value == 'it was the worst of times,\n'
def test_bytesio():
orig_lines = ['line 1\n', 'line 2\n', '3\tline\n']
if PY3:
orig_lines.append('line ∞\n')
else:
orig_lines = [unicode(s) for s in orig_lines]
orig_lines.append('line \xe2\x88\x9e\n'.decode('utf-8'))
data = ''.join(orig_lines).encode('utf-8')
f = BytesIO(data)
reader = LineReader(f)
lines = list(reader)
assert lines == list(enumerate(orig_lines))
# vim: et sw=4 sts=4
|
Add some tests for the LineReader class
|
Add some tests for the LineReader class
|
Python
|
apache-2.0
|
kseppi/mrs-mapreduce,byu-aml-lab/mrs-mapreduce
|
Add some tests for the LineReader class
|
import glob
from mrs.fileformats import LineReader
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
PY3 = sys.version_info[0] == 3
def test_dickens():
with open('tests/data/dickens/split1.txt', 'rb') as f:
reader = LineReader(f)
lines = list(reader)
assert len(lines) == 2
key, value = lines[0]
assert key == 0
assert value == 'It was the best of times,\n'
key, value = lines[1]
assert key == 1
assert value == 'it was the worst of times,\n'
def test_bytesio():
orig_lines = ['line 1\n', 'line 2\n', '3\tline\n']
if PY3:
orig_lines.append('line ∞\n')
else:
orig_lines = [unicode(s) for s in orig_lines]
orig_lines.append('line \xe2\x88\x9e\n'.decode('utf-8'))
data = ''.join(orig_lines).encode('utf-8')
f = BytesIO(data)
reader = LineReader(f)
lines = list(reader)
assert lines == list(enumerate(orig_lines))
# vim: et sw=4 sts=4
|
<commit_before><commit_msg>Add some tests for the LineReader class<commit_after>
|
import glob
from mrs.fileformats import LineReader
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
PY3 = sys.version_info[0] == 3
def test_dickens():
with open('tests/data/dickens/split1.txt', 'rb') as f:
reader = LineReader(f)
lines = list(reader)
assert len(lines) == 2
key, value = lines[0]
assert key == 0
assert value == 'It was the best of times,\n'
key, value = lines[1]
assert key == 1
assert value == 'it was the worst of times,\n'
def test_bytesio():
orig_lines = ['line 1\n', 'line 2\n', '3\tline\n']
if PY3:
orig_lines.append('line ∞\n')
else:
orig_lines = [unicode(s) for s in orig_lines]
orig_lines.append('line \xe2\x88\x9e\n'.decode('utf-8'))
data = ''.join(orig_lines).encode('utf-8')
f = BytesIO(data)
reader = LineReader(f)
lines = list(reader)
assert lines == list(enumerate(orig_lines))
# vim: et sw=4 sts=4
|
Add some tests for the LineReader classimport glob
from mrs.fileformats import LineReader
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
PY3 = sys.version_info[0] == 3
def test_dickens():
with open('tests/data/dickens/split1.txt', 'rb') as f:
reader = LineReader(f)
lines = list(reader)
assert len(lines) == 2
key, value = lines[0]
assert key == 0
assert value == 'It was the best of times,\n'
key, value = lines[1]
assert key == 1
assert value == 'it was the worst of times,\n'
def test_bytesio():
orig_lines = ['line 1\n', 'line 2\n', '3\tline\n']
if PY3:
orig_lines.append('line ∞\n')
else:
orig_lines = [unicode(s) for s in orig_lines]
orig_lines.append('line \xe2\x88\x9e\n'.decode('utf-8'))
data = ''.join(orig_lines).encode('utf-8')
f = BytesIO(data)
reader = LineReader(f)
lines = list(reader)
assert lines == list(enumerate(orig_lines))
# vim: et sw=4 sts=4
|
<commit_before><commit_msg>Add some tests for the LineReader class<commit_after>import glob
from mrs.fileformats import LineReader
import sys
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
PY3 = sys.version_info[0] == 3
def test_dickens():
with open('tests/data/dickens/split1.txt', 'rb') as f:
reader = LineReader(f)
lines = list(reader)
assert len(lines) == 2
key, value = lines[0]
assert key == 0
assert value == 'It was the best of times,\n'
key, value = lines[1]
assert key == 1
assert value == 'it was the worst of times,\n'
def test_bytesio():
orig_lines = ['line 1\n', 'line 2\n', '3\tline\n']
if PY3:
orig_lines.append('line ∞\n')
else:
orig_lines = [unicode(s) for s in orig_lines]
orig_lines.append('line \xe2\x88\x9e\n'.decode('utf-8'))
data = ''.join(orig_lines).encode('utf-8')
f = BytesIO(data)
reader = LineReader(f)
lines = list(reader)
assert lines == list(enumerate(orig_lines))
# vim: et sw=4 sts=4
|
|
9621789c86743f6f75096760827d7c4458973f1a
|
tests/web/test_web_visual_testing.py
|
tests/web/test_web_visual_testing.py
|
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from toolium_examples.test_cases import SeleniumTestCase
from toolium_examples.pageobjects.web.login import LoginPageObject
class Login(SeleniumTestCase):
def test_successful_login_logout(self):
user = {'username': 'tomsmith', 'password': 'SuperSecretPassword!'}
expected_login_message = "You logged into a secure area!"
expected_logout_message = "You logged out of the secure area!"
# Open login form
login_page = LoginPageObject().open()
# Assert the full screen
self.assertFullScreenshot('login_form')
# Assert the full screen excluding a web element
self.assertFullScreenshot('login_form_no_password', exclude_elements=[login_page.password])
# Assert only a web element
self.assertScreenshot(login_page.login_button, 'login_submit_button')
# Login and check welcome message
secure_area = login_page.login(user)
self.assertEqual(expected_login_message, secure_area.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_secure_area')
# Logout and check logout message
login_page = secure_area.logout()
self.assertEqual(expected_logout_message, login_page.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_logout')
|
Add an example test with visual testing asserts
|
Add an example test with visual testing asserts
|
Python
|
apache-2.0
|
Telefonica/toolium-examples
|
Add an example test with visual testing asserts
|
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from toolium_examples.test_cases import SeleniumTestCase
from toolium_examples.pageobjects.web.login import LoginPageObject
class Login(SeleniumTestCase):
def test_successful_login_logout(self):
user = {'username': 'tomsmith', 'password': 'SuperSecretPassword!'}
expected_login_message = "You logged into a secure area!"
expected_logout_message = "You logged out of the secure area!"
# Open login form
login_page = LoginPageObject().open()
# Assert the full screen
self.assertFullScreenshot('login_form')
# Assert the full screen excluding a web element
self.assertFullScreenshot('login_form_no_password', exclude_elements=[login_page.password])
# Assert only a web element
self.assertScreenshot(login_page.login_button, 'login_submit_button')
# Login and check welcome message
secure_area = login_page.login(user)
self.assertEqual(expected_login_message, secure_area.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_secure_area')
# Logout and check logout message
login_page = secure_area.logout()
self.assertEqual(expected_logout_message, login_page.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_logout')
|
<commit_before><commit_msg>Add an example test with visual testing asserts<commit_after>
|
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from toolium_examples.test_cases import SeleniumTestCase
from toolium_examples.pageobjects.web.login import LoginPageObject
class Login(SeleniumTestCase):
def test_successful_login_logout(self):
user = {'username': 'tomsmith', 'password': 'SuperSecretPassword!'}
expected_login_message = "You logged into a secure area!"
expected_logout_message = "You logged out of the secure area!"
# Open login form
login_page = LoginPageObject().open()
# Assert the full screen
self.assertFullScreenshot('login_form')
# Assert the full screen excluding a web element
self.assertFullScreenshot('login_form_no_password', exclude_elements=[login_page.password])
# Assert only a web element
self.assertScreenshot(login_page.login_button, 'login_submit_button')
# Login and check welcome message
secure_area = login_page.login(user)
self.assertEqual(expected_login_message, secure_area.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_secure_area')
# Logout and check logout message
login_page = secure_area.logout()
self.assertEqual(expected_logout_message, login_page.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_logout')
|
Add an example test with visual testing asserts# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from toolium_examples.test_cases import SeleniumTestCase
from toolium_examples.pageobjects.web.login import LoginPageObject
class Login(SeleniumTestCase):
def test_successful_login_logout(self):
user = {'username': 'tomsmith', 'password': 'SuperSecretPassword!'}
expected_login_message = "You logged into a secure area!"
expected_logout_message = "You logged out of the secure area!"
# Open login form
login_page = LoginPageObject().open()
# Assert the full screen
self.assertFullScreenshot('login_form')
# Assert the full screen excluding a web element
self.assertFullScreenshot('login_form_no_password', exclude_elements=[login_page.password])
# Assert only a web element
self.assertScreenshot(login_page.login_button, 'login_submit_button')
# Login and check welcome message
secure_area = login_page.login(user)
self.assertEqual(expected_login_message, secure_area.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_secure_area')
# Logout and check logout message
login_page = secure_area.logout()
self.assertEqual(expected_logout_message, login_page.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_logout')
|
<commit_before><commit_msg>Add an example test with visual testing asserts<commit_after># -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from toolium_examples.test_cases import SeleniumTestCase
from toolium_examples.pageobjects.web.login import LoginPageObject
class Login(SeleniumTestCase):
def test_successful_login_logout(self):
user = {'username': 'tomsmith', 'password': 'SuperSecretPassword!'}
expected_login_message = "You logged into a secure area!"
expected_logout_message = "You logged out of the secure area!"
# Open login form
login_page = LoginPageObject().open()
# Assert the full screen
self.assertFullScreenshot('login_form')
# Assert the full screen excluding a web element
self.assertFullScreenshot('login_form_no_password', exclude_elements=[login_page.password])
# Assert only a web element
self.assertScreenshot(login_page.login_button, 'login_submit_button')
# Login and check welcome message
secure_area = login_page.login(user)
self.assertEqual(expected_login_message, secure_area.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_secure_area')
# Logout and check logout message
login_page = secure_area.logout()
self.assertEqual(expected_logout_message, login_page.message.get_message())
# Assert the full screen
self.assertFullScreenshot('login_logout')
|
|
7bd561d72fc79927ed8960f2528af671b0fc45c8
|
gwtsa/imports/import_series.py
|
gwtsa/imports/import_series.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 10 11:33:13 2016
@author: ruben
"""
from gwtsa.imports.dinodata import DinoGrondwaterstand
from gwtsa.imports.knmidata import KnmiStation
from pyproj import Proj, transform
class ImportSeries:
def __init__(self, fname, filetype, variable = None):
if filetype=='dino':
dino = DinoGrondwaterstand(fname)
self.series=dino.stand
self.xy=(dino.x, dino.y)
self.latlon=self.rd2wgs(self.xy)
self.meta=dino.meta[-1]
elif filetype=='knmi':
knmi = KnmiStation.fromfile(fname)
self.series=knmi.data[variable]
self.latlon=(knmi.stations['LAT_north'][0],
knmi.stations['LON_east'][0])
names=knmi.stations.dtype.names
self.meta=dict(zip(names,knmi.stations[0]))
elif filetype=='usgs':
# not implemented yet
pass
elif filetype=='csv':
# not implemented yet
pass
else:
raise Exception('Unknown filtype')
def rd2wgs(self,xy):
outProj = Proj(init='epsg:4326')
inProj = Proj(init='epsg:28992')
lon,lat = transform(inProj,outProj,xy[0],xy[1])
return (lat,lon)
|
Add import file from Ruben
|
Add import file from Ruben
|
Python
|
mit
|
pastas/pasta,gwtsa/gwtsa,pastas/pastas
|
Add import file from Ruben
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 10 11:33:13 2016
@author: ruben
"""
from gwtsa.imports.dinodata import DinoGrondwaterstand
from gwtsa.imports.knmidata import KnmiStation
from pyproj import Proj, transform
class ImportSeries:
def __init__(self, fname, filetype, variable = None):
if filetype=='dino':
dino = DinoGrondwaterstand(fname)
self.series=dino.stand
self.xy=(dino.x, dino.y)
self.latlon=self.rd2wgs(self.xy)
self.meta=dino.meta[-1]
elif filetype=='knmi':
knmi = KnmiStation.fromfile(fname)
self.series=knmi.data[variable]
self.latlon=(knmi.stations['LAT_north'][0],
knmi.stations['LON_east'][0])
names=knmi.stations.dtype.names
self.meta=dict(zip(names,knmi.stations[0]))
elif filetype=='usgs':
# not implemented yet
pass
elif filetype=='csv':
# not implemented yet
pass
else:
raise Exception('Unknown filtype')
def rd2wgs(self,xy):
outProj = Proj(init='epsg:4326')
inProj = Proj(init='epsg:28992')
lon,lat = transform(inProj,outProj,xy[0],xy[1])
return (lat,lon)
|
<commit_before><commit_msg>Add import file from Ruben<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 10 11:33:13 2016
@author: ruben
"""
from gwtsa.imports.dinodata import DinoGrondwaterstand
from gwtsa.imports.knmidata import KnmiStation
from pyproj import Proj, transform
class ImportSeries:
def __init__(self, fname, filetype, variable = None):
if filetype=='dino':
dino = DinoGrondwaterstand(fname)
self.series=dino.stand
self.xy=(dino.x, dino.y)
self.latlon=self.rd2wgs(self.xy)
self.meta=dino.meta[-1]
elif filetype=='knmi':
knmi = KnmiStation.fromfile(fname)
self.series=knmi.data[variable]
self.latlon=(knmi.stations['LAT_north'][0],
knmi.stations['LON_east'][0])
names=knmi.stations.dtype.names
self.meta=dict(zip(names,knmi.stations[0]))
elif filetype=='usgs':
# not implemented yet
pass
elif filetype=='csv':
# not implemented yet
pass
else:
raise Exception('Unknown filtype')
def rd2wgs(self,xy):
outProj = Proj(init='epsg:4326')
inProj = Proj(init='epsg:28992')
lon,lat = transform(inProj,outProj,xy[0],xy[1])
return (lat,lon)
|
Add import file from Ruben# -*- coding: utf-8 -*-
"""
Created on Tue May 10 11:33:13 2016
@author: ruben
"""
from gwtsa.imports.dinodata import DinoGrondwaterstand
from gwtsa.imports.knmidata import KnmiStation
from pyproj import Proj, transform
class ImportSeries:
def __init__(self, fname, filetype, variable = None):
if filetype=='dino':
dino = DinoGrondwaterstand(fname)
self.series=dino.stand
self.xy=(dino.x, dino.y)
self.latlon=self.rd2wgs(self.xy)
self.meta=dino.meta[-1]
elif filetype=='knmi':
knmi = KnmiStation.fromfile(fname)
self.series=knmi.data[variable]
self.latlon=(knmi.stations['LAT_north'][0],
knmi.stations['LON_east'][0])
names=knmi.stations.dtype.names
self.meta=dict(zip(names,knmi.stations[0]))
elif filetype=='usgs':
# not implemented yet
pass
elif filetype=='csv':
# not implemented yet
pass
else:
raise Exception('Unknown filtype')
def rd2wgs(self,xy):
outProj = Proj(init='epsg:4326')
inProj = Proj(init='epsg:28992')
lon,lat = transform(inProj,outProj,xy[0],xy[1])
return (lat,lon)
|
<commit_before><commit_msg>Add import file from Ruben<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue May 10 11:33:13 2016
@author: ruben
"""
from gwtsa.imports.dinodata import DinoGrondwaterstand
from gwtsa.imports.knmidata import KnmiStation
from pyproj import Proj, transform
class ImportSeries:
def __init__(self, fname, filetype, variable = None):
if filetype=='dino':
dino = DinoGrondwaterstand(fname)
self.series=dino.stand
self.xy=(dino.x, dino.y)
self.latlon=self.rd2wgs(self.xy)
self.meta=dino.meta[-1]
elif filetype=='knmi':
knmi = KnmiStation.fromfile(fname)
self.series=knmi.data[variable]
self.latlon=(knmi.stations['LAT_north'][0],
knmi.stations['LON_east'][0])
names=knmi.stations.dtype.names
self.meta=dict(zip(names,knmi.stations[0]))
elif filetype=='usgs':
# not implemented yet
pass
elif filetype=='csv':
# not implemented yet
pass
else:
raise Exception('Unknown filtype')
def rd2wgs(self,xy):
outProj = Proj(init='epsg:4326')
inProj = Proj(init='epsg:28992')
lon,lat = transform(inProj,outProj,xy[0],xy[1])
return (lat,lon)
|
|
10dfd1ca4ab8e3bd7956c60a98abb31d3fb58405
|
tests/test_multi_open_file.py
|
tests/test_multi_open_file.py
|
import nose
import angr
import os
import logging
l = logging.getLogger("angr_tests")
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_test_file_struct_funcs():
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/test_multi_open_file")
b = angr.Project(test_bin)
pg = b.factory.path_group()
pg.active[0].state.options.discard("LAZY_SOLVES")
pg.explore()
nose.tools.assert_equal(len(pg.deadended), 1)
for p in pg.deadended:
nose.tools.assert_true(p.state.posix.dumps(2) == "")
def test_file_struct_funcs():
yield run_test_file_struct_funcs
if __name__ == "__main__":
run_test_file_struct_funcs()
|
Test for opening same file multiple times
|
Test for opening same file multiple times
|
Python
|
bsd-2-clause
|
f-prettyland/angr,axt/angr,angr/angr,axt/angr,iamahuman/angr,schieb/angr,iamahuman/angr,tyb0807/angr,schieb/angr,schieb/angr,f-prettyland/angr,chubbymaggie/angr,angr/angr,angr/angr,tyb0807/angr,axt/angr,tyb0807/angr,f-prettyland/angr,iamahuman/angr,chubbymaggie/angr,chubbymaggie/angr
|
Test for opening same file multiple times
|
import nose
import angr
import os
import logging
l = logging.getLogger("angr_tests")
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_test_file_struct_funcs():
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/test_multi_open_file")
b = angr.Project(test_bin)
pg = b.factory.path_group()
pg.active[0].state.options.discard("LAZY_SOLVES")
pg.explore()
nose.tools.assert_equal(len(pg.deadended), 1)
for p in pg.deadended:
nose.tools.assert_true(p.state.posix.dumps(2) == "")
def test_file_struct_funcs():
yield run_test_file_struct_funcs
if __name__ == "__main__":
run_test_file_struct_funcs()
|
<commit_before><commit_msg>Test for opening same file multiple times<commit_after>
|
import nose
import angr
import os
import logging
l = logging.getLogger("angr_tests")
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_test_file_struct_funcs():
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/test_multi_open_file")
b = angr.Project(test_bin)
pg = b.factory.path_group()
pg.active[0].state.options.discard("LAZY_SOLVES")
pg.explore()
nose.tools.assert_equal(len(pg.deadended), 1)
for p in pg.deadended:
nose.tools.assert_true(p.state.posix.dumps(2) == "")
def test_file_struct_funcs():
yield run_test_file_struct_funcs
if __name__ == "__main__":
run_test_file_struct_funcs()
|
Test for opening same file multiple timesimport nose
import angr
import os
import logging
l = logging.getLogger("angr_tests")
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_test_file_struct_funcs():
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/test_multi_open_file")
b = angr.Project(test_bin)
pg = b.factory.path_group()
pg.active[0].state.options.discard("LAZY_SOLVES")
pg.explore()
nose.tools.assert_equal(len(pg.deadended), 1)
for p in pg.deadended:
nose.tools.assert_true(p.state.posix.dumps(2) == "")
def test_file_struct_funcs():
yield run_test_file_struct_funcs
if __name__ == "__main__":
run_test_file_struct_funcs()
|
<commit_before><commit_msg>Test for opening same file multiple times<commit_after>import nose
import angr
import os
import logging
l = logging.getLogger("angr_tests")
test_location = str(os.path.dirname(os.path.realpath(__file__)))
def run_test_file_struct_funcs():
test_bin = os.path.join(test_location, "../../binaries/tests/x86_64/test_multi_open_file")
b = angr.Project(test_bin)
pg = b.factory.path_group()
pg.active[0].state.options.discard("LAZY_SOLVES")
pg.explore()
nose.tools.assert_equal(len(pg.deadended), 1)
for p in pg.deadended:
nose.tools.assert_true(p.state.posix.dumps(2) == "")
def test_file_struct_funcs():
yield run_test_file_struct_funcs
if __name__ == "__main__":
run_test_file_struct_funcs()
|
|
e869407e1b8c9a7e8d3d1ad1578bb83541dd7ffa
|
tests/test_07_client_clone.py
|
tests/test_07_client_clone.py
|
"""Test :meth:`icat.client.Client.clone`.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
from conftest import getConfig
def test_clone_minimal(setupicat):
"""Clone a simple client. Not logged in, no ids.
"""
client, conf = getConfig(ids=False)
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids is None
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_ids(setupicat):
"""Same as above, but configure ids this time.
"""
client, conf = getConfig(ids="mandatory")
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids.url == client.ids.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_login(setupicat):
"""Clone a client that is logged in.
The clone should not share the session. Original client and clone
should be able to login and out without interfering the other.
"""
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
clone = client.clone()
assert clone.url == client.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
assert clone.sessionId is None, "the clone must not inherit the session"
# The clone may start it's own session
clone.login(conf.auth, conf.credentials)
assert clone.sessionId
assert clone.sessionId != client.sessionId
# both are still logged in as the same user
assert clone.getUserName() == client.getUserName()
# Now logout the clone. This must not affect the client's session.
clone.logout()
assert clone.sessionId is None
assert client.sessionId
|
Add a test for Client.clone().
|
Add a test for Client.clone().
|
Python
|
apache-2.0
|
icatproject/python-icat
|
Add a test for Client.clone().
|
"""Test :meth:`icat.client.Client.clone`.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
from conftest import getConfig
def test_clone_minimal(setupicat):
"""Clone a simple client. Not logged in, no ids.
"""
client, conf = getConfig(ids=False)
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids is None
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_ids(setupicat):
"""Same as above, but configure ids this time.
"""
client, conf = getConfig(ids="mandatory")
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids.url == client.ids.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_login(setupicat):
"""Clone a client that is logged in.
The clone should not share the session. Original client and clone
should be able to login and out without interfering the other.
"""
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
clone = client.clone()
assert clone.url == client.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
assert clone.sessionId is None, "the clone must not inherit the session"
# The clone may start it's own session
clone.login(conf.auth, conf.credentials)
assert clone.sessionId
assert clone.sessionId != client.sessionId
# both are still logged in as the same user
assert clone.getUserName() == client.getUserName()
# Now logout the clone. This must not affect the client's session.
clone.logout()
assert clone.sessionId is None
assert client.sessionId
|
<commit_before><commit_msg>Add a test for Client.clone().<commit_after>
|
"""Test :meth:`icat.client.Client.clone`.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
from conftest import getConfig
def test_clone_minimal(setupicat):
"""Clone a simple client. Not logged in, no ids.
"""
client, conf = getConfig(ids=False)
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids is None
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_ids(setupicat):
"""Same as above, but configure ids this time.
"""
client, conf = getConfig(ids="mandatory")
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids.url == client.ids.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_login(setupicat):
"""Clone a client that is logged in.
The clone should not share the session. Original client and clone
should be able to login and out without interfering the other.
"""
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
clone = client.clone()
assert clone.url == client.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
assert clone.sessionId is None, "the clone must not inherit the session"
# The clone may start it's own session
clone.login(conf.auth, conf.credentials)
assert clone.sessionId
assert clone.sessionId != client.sessionId
# both are still logged in as the same user
assert clone.getUserName() == client.getUserName()
# Now logout the clone. This must not affect the client's session.
clone.logout()
assert clone.sessionId is None
assert client.sessionId
|
Add a test for Client.clone()."""Test :meth:`icat.client.Client.clone`.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
from conftest import getConfig
def test_clone_minimal(setupicat):
"""Clone a simple client. Not logged in, no ids.
"""
client, conf = getConfig(ids=False)
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids is None
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_ids(setupicat):
"""Same as above, but configure ids this time.
"""
client, conf = getConfig(ids="mandatory")
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids.url == client.ids.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_login(setupicat):
"""Clone a client that is logged in.
The clone should not share the session. Original client and clone
should be able to login and out without interfering the other.
"""
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
clone = client.clone()
assert clone.url == client.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
assert clone.sessionId is None, "the clone must not inherit the session"
# The clone may start it's own session
clone.login(conf.auth, conf.credentials)
assert clone.sessionId
assert clone.sessionId != client.sessionId
# both are still logged in as the same user
assert clone.getUserName() == client.getUserName()
# Now logout the clone. This must not affect the client's session.
clone.logout()
assert clone.sessionId is None
assert client.sessionId
|
<commit_before><commit_msg>Add a test for Client.clone().<commit_after>"""Test :meth:`icat.client.Client.clone`.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
from conftest import getConfig
def test_clone_minimal(setupicat):
"""Clone a simple client. Not logged in, no ids.
"""
client, conf = getConfig(ids=False)
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids is None
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_ids(setupicat):
"""Same as above, but configure ids this time.
"""
client, conf = getConfig(ids="mandatory")
clone = client.clone()
assert isinstance(clone, icat.client.Client)
assert clone.url == client.url
assert clone.ids.url == client.ids.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
def test_clone_login(setupicat):
"""Clone a client that is logged in.
The clone should not share the session. Original client and clone
should be able to login and out without interfering the other.
"""
client, conf = getConfig()
client.login(conf.auth, conf.credentials)
clone = client.clone()
assert clone.url == client.url
assert clone.kwargs == client.kwargs
assert clone.apiversion == client.apiversion
assert clone.sessionId is None, "the clone must not inherit the session"
# The clone may start it's own session
clone.login(conf.auth, conf.credentials)
assert clone.sessionId
assert clone.sessionId != client.sessionId
# both are still logged in as the same user
assert clone.getUserName() == client.getUserName()
# Now logout the clone. This must not affect the client's session.
clone.logout()
assert clone.sessionId is None
assert client.sessionId
|
|
3c4c0ed8c5b3e6b64e360d6c9a2ef6937a0a5269
|
source/tyr/tests/integration/endpoints_test.py
|
source/tyr/tests/integration/endpoints_test.py
|
# coding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get
from tyr import app
def test_status_ok():
"""
Test that the endpoint '/status' returns the db version
"""
resp = api_get('/v0/status/')
assert 'db version' in resp
assert resp['db version'] is not None
def test_index_ok():
"""
Test that the endpoint '/' returns a list of endpoints with their url
"""
resp = api_get('/')
assert resp is not None
for endpoint in resp.keys():
assert 'href' in resp[endpoint]
|
Add tests for '/' and '/status'
|
Add tests for '/' and '/status'
|
Python
|
agpl-3.0
|
pbougue/navitia,kinnou02/navitia,CanalTP/navitia,xlqian/navitia,pbougue/navitia,CanalTP/navitia,Tisseo/navitia,xlqian/navitia,CanalTP/navitia,CanalTP/navitia,Tisseo/navitia,kinnou02/navitia,xlqian/navitia,Tisseo/navitia,xlqian/navitia,pbougue/navitia,pbougue/navitia,CanalTP/navitia,kinnou02/navitia,Tisseo/navitia,Tisseo/navitia,xlqian/navitia,kinnou02/navitia
|
Add tests for '/' and '/status'
|
# coding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get
from tyr import app
def test_status_ok():
"""
Test that the endpoint '/status' returns the db version
"""
resp = api_get('/v0/status/')
assert 'db version' in resp
assert resp['db version'] is not None
def test_index_ok():
"""
Test that the endpoint '/' returns a list of endpoints with their url
"""
resp = api_get('/')
assert resp is not None
for endpoint in resp.keys():
assert 'href' in resp[endpoint]
|
<commit_before><commit_msg>Add tests for '/' and '/status'<commit_after>
|
# coding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get
from tyr import app
def test_status_ok():
"""
Test that the endpoint '/status' returns the db version
"""
resp = api_get('/v0/status/')
assert 'db version' in resp
assert resp['db version'] is not None
def test_index_ok():
"""
Test that the endpoint '/' returns a list of endpoints with their url
"""
resp = api_get('/')
assert resp is not None
for endpoint in resp.keys():
assert 'href' in resp[endpoint]
|
Add tests for '/' and '/status'# coding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get
from tyr import app
def test_status_ok():
"""
Test that the endpoint '/status' returns the db version
"""
resp = api_get('/v0/status/')
assert 'db version' in resp
assert resp['db version'] is not None
def test_index_ok():
"""
Test that the endpoint '/' returns a list of endpoints with their url
"""
resp = api_get('/')
assert resp is not None
for endpoint in resp.keys():
assert 'href' in resp[endpoint]
|
<commit_before><commit_msg>Add tests for '/' and '/status'<commit_after># coding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get
from tyr import app
def test_status_ok():
"""
Test that the endpoint '/status' returns the db version
"""
resp = api_get('/v0/status/')
assert 'db version' in resp
assert resp['db version'] is not None
def test_index_ok():
"""
Test that the endpoint '/' returns a list of endpoints with their url
"""
resp = api_get('/')
assert resp is not None
for endpoint in resp.keys():
assert 'href' in resp[endpoint]
|
|
daca76320aa72d03a1372139be234d03ccf8641b
|
sklearn/utils/ransac.py
|
sklearn/utils/ransac.py
|
import numpy as np
def ransac(X, y, estimator_cls, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=1, estimator_kwargs={}):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
"""
best_estimator = None
best_n_inliers = 0
best_score = np.inf
best_inlier_mask = None
best_inlier_X = None
best_inlier_y = None
# estimator used for all iterations and for output
estimator = estimator_cls(**estimator_kwargs)
# number of data samples
n_samples = X.shape[0]
for _ in range(max_trials):
# choose random sample set
random_idxs = np.random.randint(0, n_samples, min_samples)
rsample_X = X[random_idxs]
rsample_y = y[random_idxs]
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(X, y):
continue
# fit model for current random sample set
estimator.fit(rsample_X, rsample_y)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(estimator,
rsample_X,
rsample_y):
continue
# residuals of all data for current random sample model
rsample_residuals = np.abs(estimator.predict(rsample_X) - rsample_y)
# classify data into inliers and outliers
rsample_inlier_mask = rsample_residuals < residual_threshold
rsample_n_inliers = np.sum(rsample_inlier_mask)
# less inliers -> skip current random sample
if rsample_n_inliers < best_n_inliers:
continue
# extract inlier data set
rsample_inlier_X = X[rsample_inlier_mask]
rsample_inlier_y = y[rsample_inlier_mask]
# score of inlier data set
rsample_score = estimator.score(rsample_inlier_X, rsample_inlier_y)
# same number of inliers but worse score -> skip current random sample
if rsample_n_inliers == best_n_inliers and rsample_score < best_score:
continue
# save current random sample as best sample
best_n_inliers = rsample_n_inliers
best_score = rsample_score
best_inlier_mask = rsample_inlier_mask
best_inlier_X = rsample_inlier_X
best_inlier_y = rsample_inlier_y
# break if sufficient number of inliers or score is reached
if best_n_inliers >= stop_n_inliers or best_score >= stop_score:
break
# if none of the iterations met the required criteria
if best_inlier_mask is None:
return None, None
# estimate final model using all inliers
estimator.fit(best_inlier_X, best_inlier_y)
return estimator, best_inlier_mask
|
Add implementation of RANSAC algorithm
|
Add implementation of RANSAC algorithm
|
Python
|
bsd-3-clause
|
Garrett-R/scikit-learn,nesterione/scikit-learn,procoder317/scikit-learn,rishikksh20/scikit-learn,Myasuka/scikit-learn,evgchz/scikit-learn,mugizico/scikit-learn,AlexanderFabisch/scikit-learn,Windy-Ground/scikit-learn,Barmaley-exe/scikit-learn,kjung/scikit-learn,yanlend/scikit-learn,nhejazi/scikit-learn,treycausey/scikit-learn,djgagne/scikit-learn,pianomania/scikit-learn,aflaxman/scikit-learn,billy-inn/scikit-learn,kjung/scikit-learn,pompiduskus/scikit-learn,ilyes14/scikit-learn,michigraber/scikit-learn,lenovor/scikit-learn,nvoron23/scikit-learn,MartinSavc/scikit-learn,ycaihua/scikit-learn,aetilley/scikit-learn,manhhomienbienthuy/scikit-learn,herilalaina/scikit-learn,Garrett-R/scikit-learn,vortex-ape/scikit-learn,fbagirov/scikit-learn,nrhine1/scikit-learn,glennq/scikit-learn,PatrickChrist/scikit-learn,meduz/scikit-learn,UNR-AERIAL/scikit-learn,OshynSong/scikit-learn,vibhorag/scikit-learn,maheshakya/scikit-learn,vibhorag/scikit-learn,zhenv5/scikit-learn,PatrickChrist/scikit-learn,pompiduskus/scikit-learn,ngoix/OCRF,MohammedWasim/scikit-learn,ahoyosid/scikit-learn,gotomypc/scikit-learn,ngoix/OCRF,devanshdalal/scikit-learn,mikebenfield/scikit-learn,potash/scikit-learn,waterponey/scikit-learn,pianomania/scikit-learn,rvraghav93/scikit-learn,anurag313/scikit-learn,spallavolu/scikit-learn,MartinDelzant/scikit-learn,Vimos/scikit-learn,glennq/scikit-learn,zhenv5/scikit-learn,eickenberg/scikit-learn,trankmichael/scikit-learn,trungnt13/scikit-learn,RachitKansal/scikit-learn,shyamalschandra/scikit-learn,JPFrancoia/scikit-learn,sergeyf/scikit-learn,PrashntS/scikit-learn,mehdidc/scikit-learn,jlegendary/scikit-learn,kylerbrown/scikit-learn,djgagne/scikit-learn,samzhang111/scikit-learn,sanketloke/scikit-learn,sarahgrogan/scikit-learn,h2educ/scikit-learn,eg-zhang/scikit-learn,phdowling/scikit-learn,henridwyer/scikit-learn,wzbozon/scikit-learn,xiaoxiamii/scikit-learn,petosegan/scikit-learn,ZENGXH/scikit-learn,wazeerzulfikar/scikit-learn,mattgiguere/scikit-learn,Akshay0724/scikit-learn,simon-pepin/scikit-learn,aabadie/scikit-learn,sanketloke/scikit-learn,kagayakidan/scikit-learn,Djabbz/scikit-learn,xubenben/scikit-learn,AIML/scikit-learn,bigdataelephants/scikit-learn,schets/scikit-learn,larsmans/scikit-learn,rajat1994/scikit-learn,xyguo/scikit-learn,victorbergelin/scikit-learn,AlexanderFabisch/scikit-learn,robin-lai/scikit-learn,pianomania/scikit-learn,IndraVikas/scikit-learn,aabadie/scikit-learn,vivekmishra1991/scikit-learn,murali-munna/scikit-learn,Lawrence-Liu/scikit-learn,rrohan/scikit-learn,potash/scikit-learn,q1ang/scikit-learn,f3r/scikit-learn,phdowling/scikit-learn,anurag313/scikit-learn,rohanp/scikit-learn,Akshay0724/scikit-learn,fbagirov/scikit-learn,voxlol/scikit-learn,alvarofierroclavero/scikit-learn,fredhusser/scikit-learn,khkaminska/scikit-learn,ElDeveloper/scikit-learn,pnedunuri/scikit-learn,UNR-AERIAL/scikit-learn,wzbozon/scikit-learn,xavierwu/scikit-learn,xuewei4d/scikit-learn,plissonf/scikit-learn,r-mart/scikit-learn,lin-credible/scikit-learn,JsNoNo/scikit-learn,mattgiguere/scikit-learn,NelisVerhoef/scikit-learn,Garrett-R/scikit-learn,huobaowangxi/scikit-learn,MatthieuBizien/scikit-learn,andrewnc/scikit-learn,Garrett-R/scikit-learn,anntzer/scikit-learn,trungnt13/scikit-learn,smartscheduling/scikit-learn-categorical-tree,cybernet14/scikit-learn,untom/scikit-learn,Aasmi/scikit-learn,jmetzen/scikit-learn,kaichogami/scikit-learn,potash/scikit-learn,anirudhjayaraman/scikit-learn,jm-begon/scikit-learn,mattilyra/scikit-learn,pypot/scikit-learn,sumspr/scikit-learn,hugobowne/scikit-learn,ChanChiChoi/scikit-learn,costypetrisor/scikit-learn,shikhardb/scikit-learn,altairpearl/scikit-learn,ivannz/scikit-learn,etkirsch/scikit-learn,JeanKossaifi/scikit-learn,amueller/scikit-learn,wlamond/scikit-learn,yonglehou/scikit-learn,plissonf/scikit-learn,tomlof/scikit-learn,mojoboss/scikit-learn,Obus/scikit-learn,Sentient07/scikit-learn,xzh86/scikit-learn,pkruskal/scikit-learn,ltiao/scikit-learn,samzhang111/scikit-learn,RomainBrault/scikit-learn,0asa/scikit-learn,MartinSavc/scikit-learn,aabadie/scikit-learn,kashif/scikit-learn,Srisai85/scikit-learn,nomadcube/scikit-learn,espg/scikit-learn,joernhees/scikit-learn,jakobworldpeace/scikit-learn,appapantula/scikit-learn,walterreade/scikit-learn,glemaitre/scikit-learn,raghavrv/scikit-learn,wzbozon/scikit-learn,tomlof/scikit-learn,ilo10/scikit-learn,hrjn/scikit-learn,belltailjp/scikit-learn,aminert/scikit-learn,bhargav/scikit-learn,davidgbe/scikit-learn,rajat1994/scikit-learn,moutai/scikit-learn,chrsrds/scikit-learn,zaxtax/scikit-learn,tdhopper/scikit-learn,ElDeveloper/scikit-learn,andaag/scikit-learn,akionakamura/scikit-learn,YinongLong/scikit-learn,robin-lai/scikit-learn,alvarofierroclavero/scikit-learn,qifeigit/scikit-learn,bikong2/scikit-learn,moutai/scikit-learn,mblondel/scikit-learn,Windy-Ground/scikit-learn,siutanwong/scikit-learn,ZenDevelopmentSystems/scikit-learn,jmschrei/scikit-learn,ivannz/scikit-learn,RayMick/scikit-learn,loli/semisupervisedforests,Adai0808/scikit-learn,andrewnc/scikit-learn,betatim/scikit-learn,ogrisel/scikit-learn,fredhusser/scikit-learn,mjgrav2001/scikit-learn,cauchycui/scikit-learn,abhishekkrthakur/scikit-learn,spallavolu/scikit-learn,ldirer/scikit-learn,glouppe/scikit-learn,maheshakya/scikit-learn,rohanp/scikit-learn,anntzer/scikit-learn,zihua/scikit-learn,lesteve/scikit-learn,pianomania/scikit-learn,thientu/scikit-learn,tmhm/scikit-learn,YinongLong/scikit-learn,treycausey/scikit-learn,3manuek/scikit-learn,liyu1990/sklearn,arjoly/scikit-learn,luo66/scikit-learn,Sentient07/scikit-learn,marcocaccin/scikit-learn,DonBeo/scikit-learn,fzalkow/scikit-learn,walterreade/scikit-learn,carrillo/scikit-learn,MechCoder/scikit-learn,aetilley/scikit-learn,mxjl620/scikit-learn,gotomypc/scikit-learn,yanlend/scikit-learn,aminert/scikit-learn,dingocuster/scikit-learn,huobaowangxi/scikit-learn,mhue/scikit-learn,huobaowangxi/scikit-learn,bthirion/scikit-learn,samzhang111/scikit-learn,DSLituiev/scikit-learn,abhishekgahlot/scikit-learn,mblondel/scikit-learn,costypetrisor/scikit-learn,ogrisel/scikit-learn,HolgerPeters/scikit-learn,untom/scikit-learn,fabioticconi/scikit-learn,vigilv/scikit-learn,dsquareindia/scikit-learn,zihua/scikit-learn,AlexRobson/scikit-learn,joernhees/scikit-learn,Nyker510/scikit-learn,mfjb/scikit-learn,LohithBlaze/scikit-learn,pratapvardhan/scikit-learn,0x0all/scikit-learn,ashhher3/scikit-learn,sinhrks/scikit-learn,massmutual/scikit-learn,tawsifkhan/scikit-learn,krez13/scikit-learn,vshtanko/scikit-learn,arabenjamin/scikit-learn,sarahgrogan/scikit-learn,larsmans/scikit-learn,kevin-intel/scikit-learn,hrjn/scikit-learn,hdmetor/scikit-learn,idlead/scikit-learn,yyjiang/scikit-learn,ElDeveloper/scikit-learn,wazeerzulfikar/scikit-learn,AlexRobson/scikit-learn,krez13/scikit-learn,xiaoxiamii/scikit-learn,mojoboss/scikit-learn,sanketloke/scikit-learn,hsuantien/scikit-learn,hsiaoyi0504/scikit-learn,btabibian/scikit-learn,loli/semisupervisedforests,nomadcube/scikit-learn,glemaitre/scikit-learn,vybstat/scikit-learn,glemaitre/scikit-learn,ssaeger/scikit-learn,pratapvardhan/scikit-learn,roxyboy/scikit-learn,vinayak-mehta/scikit-learn,ycaihua/scikit-learn,vibhorag/scikit-learn,abhishekkrthakur/scikit-learn,JosmanPS/scikit-learn,toastedcornflakes/scikit-learn,tomlof/scikit-learn,Akshay0724/scikit-learn,xiaoxiamii/scikit-learn,btabibian/scikit-learn,nikitasingh981/scikit-learn,anntzer/scikit-learn,shangwuhencc/scikit-learn,xavierwu/scikit-learn,jorik041/scikit-learn,rexshihaoren/scikit-learn,bhargav/scikit-learn,sonnyhu/scikit-learn,Nyker510/scikit-learn,sanketloke/scikit-learn,procoder317/scikit-learn,jlegendary/scikit-learn,dingocuster/scikit-learn,lenovor/scikit-learn,mattilyra/scikit-learn,mlyundin/scikit-learn,sinhrks/scikit-learn,abhishekkrthakur/scikit-learn,pv/scikit-learn,glennq/scikit-learn,shusenl/scikit-learn,3manuek/scikit-learn,potash/scikit-learn,andrewnc/scikit-learn,victorbergelin/scikit-learn,jakobworldpeace/scikit-learn,MohammedWasim/scikit-learn,cainiaocome/scikit-learn,bhargav/scikit-learn,nelson-liu/scikit-learn,shangwuhencc/scikit-learn,xavierwu/scikit-learn,mehdidc/scikit-learn,appapantula/scikit-learn,pkruskal/scikit-learn,ephes/scikit-learn,shikhardb/scikit-learn,sonnyhu/scikit-learn,bigdataelephants/scikit-learn,AlexandreAbraham/scikit-learn,AlexandreAbraham/scikit-learn,jorge2703/scikit-learn,thientu/scikit-learn,fabianp/scikit-learn,yask123/scikit-learn,AlexandreAbraham/scikit-learn,jseabold/scikit-learn,CforED/Machine-Learning,herilalaina/scikit-learn,mhue/scikit-learn,harshaneelhg/scikit-learn,plissonf/scikit-learn,adamgreenhall/scikit-learn,robbymeals/scikit-learn,TomDLT/scikit-learn,mehdidc/scikit-learn,cybernet14/scikit-learn,AIML/scikit-learn,rohanp/scikit-learn,iismd17/scikit-learn,huzq/scikit-learn,bthirion/scikit-learn,Myasuka/scikit-learn,yask123/scikit-learn,AlexandreAbraham/scikit-learn,henridwyer/scikit-learn,Djabbz/scikit-learn,jorge2703/scikit-learn,devanshdalal/scikit-learn,tmhm/scikit-learn,JPFrancoia/scikit-learn,Jimmy-Morzaria/scikit-learn,ElDeveloper/scikit-learn,betatim/scikit-learn,nmayorov/scikit-learn,deepesch/scikit-learn,aewhatley/scikit-learn,costypetrisor/scikit-learn,jkarnows/scikit-learn,xwolf12/scikit-learn,cauchycui/scikit-learn,Fireblend/scikit-learn,fredhusser/scikit-learn,elkingtonmcb/scikit-learn,abhishekgahlot/scikit-learn,CVML/scikit-learn,macks22/scikit-learn,justincassidy/scikit-learn,equialgo/scikit-learn,michigraber/scikit-learn,jzt5132/scikit-learn,bigdataelephants/scikit-learn,krez13/scikit-learn,lazywei/scikit-learn,russel1237/scikit-learn,thientu/scikit-learn,giorgiop/scikit-learn,khkaminska/scikit-learn,pkruskal/scikit-learn,Titan-C/scikit-learn,Aasmi/scikit-learn,stylianos-kampakis/scikit-learn,waterponey/scikit-learn,Titan-C/scikit-learn,fyffyt/scikit-learn,ashhher3/scikit-learn,TomDLT/scikit-learn,mojoboss/scikit-learn,sonnyhu/scikit-learn,abhishekgahlot/scikit-learn,kaichogami/scikit-learn,khkaminska/scikit-learn,lesteve/scikit-learn,qifeigit/scikit-learn,tosolveit/scikit-learn,ngoix/OCRF,meduz/scikit-learn,espg/scikit-learn,mjudsp/Tsallis,tdhopper/scikit-learn,dsquareindia/scikit-learn,liberatorqjw/scikit-learn,Adai0808/scikit-learn,imaculate/scikit-learn,toastedcornflakes/scikit-learn,yunfeilu/scikit-learn,JosmanPS/scikit-learn,petosegan/scikit-learn,bikong2/scikit-learn,frank-tancf/scikit-learn,Achuth17/scikit-learn,dsullivan7/scikit-learn,adamgreenhall/scikit-learn,loli/sklearn-ensembletrees,DSLituiev/scikit-learn,jakirkham/scikit-learn,fzalkow/scikit-learn,jorik041/scikit-learn,hainm/scikit-learn,saiwing-yeung/scikit-learn,heli522/scikit-learn,3manuek/scikit-learn,trungnt13/scikit-learn,belltailjp/scikit-learn,aewhatley/scikit-learn,altairpearl/scikit-learn,alexsavio/scikit-learn,giorgiop/scikit-learn,xuewei4d/scikit-learn,Barmaley-exe/scikit-learn,IshankGulati/scikit-learn,dingocuster/scikit-learn,rahuldhote/scikit-learn,ahoyosid/scikit-learn,mattilyra/scikit-learn,bnaul/scikit-learn,marcocaccin/scikit-learn,quheng/scikit-learn,ClimbsRocks/scikit-learn,liberatorqjw/scikit-learn,ZenDevelopmentSystems/scikit-learn,hrjn/scikit-learn,mayblue9/scikit-learn,loli/sklearn-ensembletrees,ZENGXH/scikit-learn,idlead/scikit-learn,0asa/scikit-learn,anirudhjayaraman/scikit-learn,bhargav/scikit-learn,yyjiang/scikit-learn,RPGOne/scikit-learn,alexsavio/scikit-learn,heli522/scikit-learn,vermouthmjl/scikit-learn,bthirion/scikit-learn,mjudsp/Tsallis,hainm/scikit-learn,pypot/scikit-learn,0asa/scikit-learn,IndraVikas/scikit-learn,shyamalschandra/scikit-learn,vinayak-mehta/scikit-learn,0x0all/scikit-learn,vinayak-mehta/scikit-learn,hitszxp/scikit-learn,dsquareindia/scikit-learn,rishikksh20/scikit-learn,ngoix/OCRF,phdowling/scikit-learn,Lawrence-Liu/scikit-learn,carrillo/scikit-learn,pratapvardhan/scikit-learn,zorroblue/scikit-learn,mblondel/scikit-learn,fengzhyuan/scikit-learn,AIML/scikit-learn,ycaihua/scikit-learn,kagayakidan/scikit-learn,sergeyf/scikit-learn,nmayorov/scikit-learn,kashif/scikit-learn,OshynSong/scikit-learn,samuel1208/scikit-learn,ChanderG/scikit-learn,scikit-learn/scikit-learn,fredhusser/scikit-learn,JPFrancoia/scikit-learn,jmschrei/scikit-learn,schets/scikit-learn,trankmichael/scikit-learn,fyffyt/scikit-learn,henrykironde/scikit-learn,shikhardb/scikit-learn,petosegan/scikit-learn,voxlol/scikit-learn,Myasuka/scikit-learn,nomadcube/scikit-learn,kjung/scikit-learn,scikit-learn/scikit-learn,mehdidc/scikit-learn,RachitKansal/scikit-learn,xubenben/scikit-learn,larsmans/scikit-learn,bikong2/scikit-learn,smartscheduling/scikit-learn-categorical-tree,liberatorqjw/scikit-learn,pratapvardhan/scikit-learn,kevin-intel/scikit-learn,rvraghav93/scikit-learn,shyamalschandra/scikit-learn,JosmanPS/scikit-learn,jjx02230808/project0223,466152112/scikit-learn,nrhine1/scikit-learn,fabianp/scikit-learn,0x0all/scikit-learn,LiaoPan/scikit-learn,RPGOne/scikit-learn,luo66/scikit-learn,ClimbsRocks/scikit-learn,harshaneelhg/scikit-learn,madjelan/scikit-learn,procoder317/scikit-learn,eickenberg/scikit-learn,MartinSavc/scikit-learn,pythonvietnam/scikit-learn,liangz0707/scikit-learn,ishanic/scikit-learn,xubenben/scikit-learn,hsiaoyi0504/scikit-learn,alexeyum/scikit-learn,liangz0707/scikit-learn,dsullivan7/scikit-learn,joernhees/scikit-learn,wanggang3333/scikit-learn,jpautom/scikit-learn,JPFrancoia/scikit-learn,aewhatley/scikit-learn,anirudhjayaraman/scikit-learn,mfjb/scikit-learn,beepee14/scikit-learn,vermouthmjl/scikit-learn,aminert/scikit-learn,Achuth17/scikit-learn,terkkila/scikit-learn,fabioticconi/scikit-learn,Lawrence-Liu/scikit-learn,mattgiguere/scikit-learn,mayblue9/scikit-learn,yunfeilu/scikit-learn,sinhrks/scikit-learn,jmetzen/scikit-learn,arahuja/scikit-learn,luo66/scikit-learn,herilalaina/scikit-learn,davidgbe/scikit-learn,loli/sklearn-ensembletrees,jereze/scikit-learn,yask123/scikit-learn,jblackburne/scikit-learn,hugobowne/scikit-learn,icdishb/scikit-learn,RPGOne/scikit-learn,DonBeo/scikit-learn,shikhardb/scikit-learn,jzt5132/scikit-learn,evgchz/scikit-learn,jorge2703/scikit-learn,deepesch/scikit-learn,hugobowne/scikit-learn,vivekmishra1991/scikit-learn,jaidevd/scikit-learn,PrashntS/scikit-learn,jakobworldpeace/scikit-learn,thilbern/scikit-learn,jereze/scikit-learn,fengzhyuan/scikit-learn,olologin/scikit-learn,macks22/scikit-learn,liyu1990/sklearn,sonnyhu/scikit-learn,jakirkham/scikit-learn,hainm/scikit-learn,Vimos/scikit-learn,NelisVerhoef/scikit-learn,robin-lai/scikit-learn,RayMick/scikit-learn,beepee14/scikit-learn,cl4rke/scikit-learn,ephes/scikit-learn,samzhang111/scikit-learn,DSLituiev/scikit-learn,rahul-c1/scikit-learn,jjx02230808/project0223,nomadcube/scikit-learn,mlyundin/scikit-learn,pv/scikit-learn,adamgreenhall/scikit-learn,wazeerzulfikar/scikit-learn,Srisai85/scikit-learn,Nyker510/scikit-learn,mhdella/scikit-learn,andrewnc/scikit-learn,themrmax/scikit-learn,treycausey/scikit-learn,gclenaghan/scikit-learn,cybernet14/scikit-learn,manhhomienbienthuy/scikit-learn,eickenberg/scikit-learn,procoder317/scikit-learn,shyamalschandra/scikit-learn,ephes/scikit-learn,YinongLong/scikit-learn,UNR-AERIAL/scikit-learn,stylianos-kampakis/scikit-learn,abimannans/scikit-learn,ChanChiChoi/scikit-learn,rahuldhote/scikit-learn,liangz0707/scikit-learn,spallavolu/scikit-learn,voxlol/scikit-learn,IssamLaradji/scikit-learn,yask123/scikit-learn,roxyboy/scikit-learn,toastedcornflakes/scikit-learn,elkingtonmcb/scikit-learn,ldirer/scikit-learn,eickenberg/scikit-learn,anurag313/scikit-learn,pythonvietnam/scikit-learn,andaag/scikit-learn,cainiaocome/scikit-learn,xyguo/scikit-learn,olologin/scikit-learn,huzq/scikit-learn,ilyes14/scikit-learn,joshloyal/scikit-learn,sumspr/scikit-learn,r-mart/scikit-learn,anntzer/scikit-learn,rahul-c1/scikit-learn,arahuja/scikit-learn,vybstat/scikit-learn,tosolveit/scikit-learn,cainiaocome/scikit-learn,tawsifkhan/scikit-learn,nikitasingh981/scikit-learn,elkingtonmcb/scikit-learn,hdmetor/scikit-learn,simon-pepin/scikit-learn,loli/semisupervisedforests,ky822/scikit-learn,marcocaccin/scikit-learn,walterreade/scikit-learn,cwu2011/scikit-learn,sumspr/scikit-learn,thientu/scikit-learn,djgagne/scikit-learn,dsullivan7/scikit-learn,Vimos/scikit-learn,MohammedWasim/scikit-learn,JeanKossaifi/scikit-learn,AlexRobson/scikit-learn,PatrickOReilly/scikit-learn,Srisai85/scikit-learn,h2educ/scikit-learn,qifeigit/scikit-learn,chrisburr/scikit-learn,mlyundin/scikit-learn,idlead/scikit-learn,quheng/scikit-learn,ldirer/scikit-learn,ankurankan/scikit-learn,MatthieuBizien/scikit-learn,shahankhatch/scikit-learn,joshloyal/scikit-learn,jayflo/scikit-learn,PatrickOReilly/scikit-learn,hitszxp/scikit-learn,belltailjp/scikit-learn,aflaxman/scikit-learn,466152112/scikit-learn,TomDLT/scikit-learn,lbishal/scikit-learn,clemkoa/scikit-learn,jorik041/scikit-learn,HolgerPeters/scikit-learn,Jimmy-Morzaria/scikit-learn,hsuantien/scikit-learn,tomlof/scikit-learn,jm-begon/scikit-learn,krez13/scikit-learn,arjoly/scikit-learn,zuku1985/scikit-learn,zaxtax/scikit-learn,alexeyum/scikit-learn,hsuantien/scikit-learn,r-mart/scikit-learn,jlegendary/scikit-learn,h2educ/scikit-learn,cl4rke/scikit-learn,glouppe/scikit-learn,nmayorov/scikit-learn,raghavrv/scikit-learn,iismd17/scikit-learn,theoryno3/scikit-learn,cybernet14/scikit-learn,zorroblue/scikit-learn,PatrickOReilly/scikit-learn,ClimbsRocks/scikit-learn,Fireblend/scikit-learn,AIML/scikit-learn,CforED/Machine-Learning,simon-pepin/scikit-learn,heli522/scikit-learn,macks22/scikit-learn,liyu1990/sklearn,massmutual/scikit-learn,manashmndl/scikit-learn,robbymeals/scikit-learn,NunoEdgarGub1/scikit-learn,Myasuka/scikit-learn,ky822/scikit-learn,yanlend/scikit-learn,cwu2011/scikit-learn,zuku1985/scikit-learn,CVML/scikit-learn,khkaminska/scikit-learn,mojoboss/scikit-learn,mayblue9/scikit-learn,zuku1985/scikit-learn,massmutual/scikit-learn,pv/scikit-learn,maheshakya/scikit-learn,bigdataelephants/scikit-learn,etkirsch/scikit-learn,wanggang3333/scikit-learn,evgchz/scikit-learn,jereze/scikit-learn,jblackburne/scikit-learn,lbishal/scikit-learn,mxjl620/scikit-learn,shenzebang/scikit-learn,mugizico/scikit-learn,roxyboy/scikit-learn,justincassidy/scikit-learn,pythonvietnam/scikit-learn,ankurankan/scikit-learn,MartinDelzant/scikit-learn,ZENGXH/scikit-learn,tawsifkhan/scikit-learn,lin-credible/scikit-learn,hitszxp/scikit-learn,billy-inn/scikit-learn,deepesch/scikit-learn,jaidevd/scikit-learn,murali-munna/scikit-learn,vivekmishra1991/scikit-learn,shahankhatch/scikit-learn,DSLituiev/scikit-learn,glemaitre/scikit-learn,JeanKossaifi/scikit-learn,qifeigit/scikit-learn,jjx02230808/project0223,huobaowangxi/scikit-learn,devanshdalal/scikit-learn,wanggang3333/scikit-learn,BiaDarkia/scikit-learn,vigilv/scikit-learn,shangwuhencc/scikit-learn,BiaDarkia/scikit-learn,ycaihua/scikit-learn,rexshihaoren/scikit-learn,vortex-ape/scikit-learn,pompiduskus/scikit-learn,PatrickChrist/scikit-learn,ankurankan/scikit-learn,rahuldhote/scikit-learn,ningchi/scikit-learn,OshynSong/scikit-learn,mwv/scikit-learn,raghavrv/scikit-learn,ahoyosid/scikit-learn,abhishekkrthakur/scikit-learn,yonglehou/scikit-learn,ChanderG/scikit-learn,IshankGulati/scikit-learn,ankurankan/scikit-learn,loli/sklearn-ensembletrees,CVML/scikit-learn,theoryno3/scikit-learn,meduz/scikit-learn,rahul-c1/scikit-learn,ndingwall/scikit-learn,arahuja/scikit-learn,alexeyum/scikit-learn,r-mart/scikit-learn,clemkoa/scikit-learn,ishanic/scikit-learn,imaculate/scikit-learn,MechCoder/scikit-learn,DonBeo/scikit-learn,cainiaocome/scikit-learn,shangwuhencc/scikit-learn,cauchycui/scikit-learn,nelson-liu/scikit-learn,kylerbrown/scikit-learn,jblackburne/scikit-learn,TomDLT/scikit-learn,wazeerzulfikar/scikit-learn,dingocuster/scikit-learn,abhishekgahlot/scikit-learn,lazywei/scikit-learn,kylerbrown/scikit-learn,macks22/scikit-learn,ivannz/scikit-learn,nvoron23/scikit-learn,AnasGhrab/scikit-learn,arjoly/scikit-learn,nvoron23/scikit-learn,amueller/scikit-learn,ky822/scikit-learn,aflaxman/scikit-learn,gclenaghan/scikit-learn,trankmichael/scikit-learn,Srisai85/scikit-learn,zaxtax/scikit-learn,aflaxman/scikit-learn,jpautom/scikit-learn,altairpearl/scikit-learn,russel1237/scikit-learn,poryfly/scikit-learn,amueller/scikit-learn,smartscheduling/scikit-learn-categorical-tree,altairpearl/scikit-learn,xubenben/scikit-learn,AnasGhrab/scikit-learn,glennq/scikit-learn,aabadie/scikit-learn,ngoix/OCRF,hainm/scikit-learn,ClimbsRocks/scikit-learn,jkarnows/scikit-learn,jseabold/scikit-learn,hsiaoyi0504/scikit-learn,idlead/scikit-learn,saiwing-yeung/scikit-learn,LiaoPan/scikit-learn,madjelan/scikit-learn,untom/scikit-learn,djgagne/scikit-learn,IssamLaradji/scikit-learn,ningchi/scikit-learn,amueller/scikit-learn,ssaeger/scikit-learn,nesterione/scikit-learn,tdhopper/scikit-learn,vybstat/scikit-learn,herilalaina/scikit-learn,LohithBlaze/scikit-learn,plissonf/scikit-learn,maheshakya/scikit-learn,mwv/scikit-learn,yunfeilu/scikit-learn,hdmetor/scikit-learn,Windy-Ground/scikit-learn,q1ang/scikit-learn,RayMick/scikit-learn,zorojean/scikit-learn,henridwyer/scikit-learn,fabianp/scikit-learn,ashhher3/scikit-learn,nesterione/scikit-learn,fyffyt/scikit-learn,themrmax/scikit-learn,frank-tancf/scikit-learn,trankmichael/scikit-learn,samuel1208/scikit-learn,siutanwong/scikit-learn,robin-lai/scikit-learn,harshaneelhg/scikit-learn,AnasGhrab/scikit-learn,ngoix/OCRF,jereze/scikit-learn,hlin117/scikit-learn,Titan-C/scikit-learn,walterreade/scikit-learn,xzh86/scikit-learn,abimannans/scikit-learn,robbymeals/scikit-learn,rajat1994/scikit-learn,jjx02230808/project0223,beepee14/scikit-learn,larsmans/scikit-learn,xiaoxiamii/scikit-learn,vivekmishra1991/scikit-learn,ogrisel/scikit-learn,tosolveit/scikit-learn,OshynSong/scikit-learn,ahoyosid/scikit-learn,jakobworldpeace/scikit-learn,ilyes14/scikit-learn,massmutual/scikit-learn,ChanderG/scikit-learn,nhejazi/scikit-learn,kagayakidan/scikit-learn,LiaoPan/scikit-learn,hugobowne/scikit-learn,tmhm/scikit-learn,moutai/scikit-learn,zorroblue/scikit-learn,ashhher3/scikit-learn,nhejazi/scikit-learn,beepee14/scikit-learn,chrisburr/scikit-learn,Clyde-fare/scikit-learn,rrohan/scikit-learn,treycausey/scikit-learn,joernhees/scikit-learn,aminert/scikit-learn,stylianos-kampakis/scikit-learn,larsmans/scikit-learn,wlamond/scikit-learn,bnaul/scikit-learn,anirudhjayaraman/scikit-learn,xzh86/scikit-learn,betatim/scikit-learn,shenzebang/scikit-learn,RomainBrault/scikit-learn,BiaDarkia/scikit-learn,Barmaley-exe/scikit-learn,BiaDarkia/scikit-learn,theoryno3/scikit-learn,RachitKansal/scikit-learn,mwv/scikit-learn,fengzhyuan/scikit-learn,fbagirov/scikit-learn,murali-munna/scikit-learn,alexsavio/scikit-learn,LohithBlaze/scikit-learn,rahul-c1/scikit-learn,terkkila/scikit-learn,Obus/scikit-learn,pypot/scikit-learn,eg-zhang/scikit-learn,f3r/scikit-learn,CVML/scikit-learn,hsuantien/scikit-learn,roxyboy/scikit-learn,f3r/scikit-learn,equialgo/scikit-learn,fzalkow/scikit-learn,LohithBlaze/scikit-learn,ChanderG/scikit-learn,ivannz/scikit-learn,fzalkow/scikit-learn,jmetzen/scikit-learn,waterponey/scikit-learn,luo66/scikit-learn,sergeyf/scikit-learn,pythonvietnam/scikit-learn,zorojean/scikit-learn,ephes/scikit-learn,yyjiang/scikit-learn,mikebenfield/scikit-learn,MartinSavc/scikit-learn,kashif/scikit-learn,Fireblend/scikit-learn,pypot/scikit-learn,olologin/scikit-learn,arahuja/scikit-learn,glouppe/scikit-learn,shenzebang/scikit-learn,Adai0808/scikit-learn,ningchi/scikit-learn,aetilley/scikit-learn,poryfly/scikit-learn,thilbern/scikit-learn,yyjiang/scikit-learn,themrmax/scikit-learn,wlamond/scikit-learn,cl4rke/scikit-learn,Adai0808/scikit-learn,mjudsp/Tsallis,chrisburr/scikit-learn,xwolf12/scikit-learn,bnaul/scikit-learn,IssamLaradji/scikit-learn,saiwing-yeung/scikit-learn,equialgo/scikit-learn,ltiao/scikit-learn,shenzebang/scikit-learn,fyffyt/scikit-learn,rrohan/scikit-learn,liberatorqjw/scikit-learn,wlamond/scikit-learn,mayblue9/scikit-learn,yonglehou/scikit-learn,btabibian/scikit-learn,mhdella/scikit-learn,hdmetor/scikit-learn,clemkoa/scikit-learn,bnaul/scikit-learn,rohanp/scikit-learn,nelson-liu/scikit-learn,zaxtax/scikit-learn,mwv/scikit-learn,mxjl620/scikit-learn,madjelan/scikit-learn,jkarnows/scikit-learn,dsquareindia/scikit-learn,jlegendary/scikit-learn,Jimmy-Morzaria/scikit-learn,lin-credible/scikit-learn,q1ang/scikit-learn,etkirsch/scikit-learn,belltailjp/scikit-learn,shahankhatch/scikit-learn,tawsifkhan/scikit-learn,mattilyra/scikit-learn,Vimos/scikit-learn,icdishb/scikit-learn,Barmaley-exe/scikit-learn,JsNoNo/scikit-learn,clemkoa/scikit-learn,ChanChiChoi/scikit-learn,rahuldhote/scikit-learn,loli/semisupervisedforests,imaculate/scikit-learn,rvraghav93/scikit-learn,michigraber/scikit-learn,pkruskal/scikit-learn,appapantula/scikit-learn,arabenjamin/scikit-learn,liyu1990/sklearn,Lawrence-Liu/scikit-learn,Obus/scikit-learn,elkingtonmcb/scikit-learn,espg/scikit-learn,arabenjamin/scikit-learn,xavierwu/scikit-learn,wzbozon/scikit-learn,Djabbz/scikit-learn,henrykironde/scikit-learn,IssamLaradji/scikit-learn,btabibian/scikit-learn,henridwyer/scikit-learn,jayflo/scikit-learn,mhdella/scikit-learn,alvarofierroclavero/scikit-learn,chrisburr/scikit-learn,mattgiguere/scikit-learn,Akshay0724/scikit-learn,mfjb/scikit-learn,fbagirov/scikit-learn,vortex-ape/scikit-learn,terkkila/scikit-learn,robbymeals/scikit-learn,pnedunuri/scikit-learn,abimannans/scikit-learn,harshaneelhg/scikit-learn,YinongLong/scikit-learn,Garrett-R/scikit-learn,ishanic/scikit-learn,billy-inn/scikit-learn,manashmndl/scikit-learn,yonglehou/scikit-learn,PatrickChrist/scikit-learn,frank-tancf/scikit-learn,Achuth17/scikit-learn,anurag313/scikit-learn,lazywei/scikit-learn,ZENGXH/scikit-learn,glouppe/scikit-learn,scikit-learn/scikit-learn,espg/scikit-learn,manashmndl/scikit-learn,mjgrav2001/scikit-learn,tosolveit/scikit-learn,etkirsch/scikit-learn,zhenv5/scikit-learn,scikit-learn/scikit-learn,AlexanderFabisch/scikit-learn,murali-munna/scikit-learn,dhruv13J/scikit-learn,iismd17/scikit-learn,CforED/Machine-Learning,shusenl/scikit-learn,dhruv13J/scikit-learn,petosegan/scikit-learn,andaag/scikit-learn,jorik041/scikit-learn,ilo10/scikit-learn,lesteve/scikit-learn,michigraber/scikit-learn,rvraghav93/scikit-learn,ningchi/scikit-learn,Obus/scikit-learn,ZenDevelopmentSystems/scikit-learn,pompiduskus/scikit-learn,betatim/scikit-learn,henrykironde/scikit-learn,alvarofierroclavero/scikit-learn,ndingwall/scikit-learn,stylianos-kampakis/scikit-learn,mjudsp/Tsallis,trungnt13/scikit-learn,zihua/scikit-learn,sumspr/scikit-learn,fabioticconi/scikit-learn,vshtanko/scikit-learn,abhishekgahlot/scikit-learn,Windy-Ground/scikit-learn,mikebenfield/scikit-learn,Djabbz/scikit-learn,vortex-ape/scikit-learn,fengzhyuan/scikit-learn,schets/scikit-learn,aetilley/scikit-learn,mugizico/scikit-learn,vigilv/scikit-learn,mblondel/scikit-learn,giorgiop/scikit-learn,ChanChiChoi/scikit-learn,rexshihaoren/scikit-learn,Sentient07/scikit-learn,equialgo/scikit-learn,carrillo/scikit-learn,MatthieuBizien/scikit-learn,jayflo/scikit-learn,AlexanderFabisch/scikit-learn,hlin117/scikit-learn,rajat1994/scikit-learn,poryfly/scikit-learn,Fireblend/scikit-learn,zihua/scikit-learn,NunoEdgarGub1/scikit-learn,abimannans/scikit-learn,h2educ/scikit-learn,Sentient07/scikit-learn,russel1237/scikit-learn,iismd17/scikit-learn,yanlend/scikit-learn,jseabold/scikit-learn,cwu2011/scikit-learn,arabenjamin/scikit-learn,ilo10/scikit-learn,deepesch/scikit-learn,jmetzen/scikit-learn,MohammedWasim/scikit-learn,jkarnows/scikit-learn,ilo10/scikit-learn,mugizico/scikit-learn,bthirion/scikit-learn,simon-pepin/scikit-learn,MartinDelzant/scikit-learn,huzq/scikit-learn,JsNoNo/scikit-learn,xwolf12/scikit-learn,PatrickOReilly/scikit-learn,UNR-AERIAL/scikit-learn,HolgerPeters/scikit-learn,MechCoder/scikit-learn,ogrisel/scikit-learn,hitszxp/scikit-learn,meduz/scikit-learn,ycaihua/scikit-learn,eg-zhang/scikit-learn,q1ang/scikit-learn,zhenv5/scikit-learn,kashif/scikit-learn,zorroblue/scikit-learn,evgchz/scikit-learn,AnasGhrab/scikit-learn,ltiao/scikit-learn,kaichogami/scikit-learn,Aasmi/scikit-learn,siutanwong/scikit-learn,ky822/scikit-learn,nvoron23/scikit-learn,RachitKansal/scikit-learn,gotomypc/scikit-learn,jm-begon/scikit-learn,alexsavio/scikit-learn,chrsrds/scikit-learn,0x0all/scikit-learn,mikebenfield/scikit-learn,manashmndl/scikit-learn,hsiaoyi0504/scikit-learn,RPGOne/scikit-learn,nrhine1/scikit-learn,NunoEdgarGub1/scikit-learn,chrsrds/scikit-learn,heli522/scikit-learn,jaidevd/scikit-learn,henrykironde/scikit-learn,pnedunuri/scikit-learn,pnedunuri/scikit-learn,rexshihaoren/scikit-learn,siutanwong/scikit-learn,sergeyf/scikit-learn,evgchz/scikit-learn,mjgrav2001/scikit-learn,giorgiop/scikit-learn,vshtanko/scikit-learn,ZenDevelopmentSystems/scikit-learn,joshloyal/scikit-learn,Titan-C/scikit-learn,appapantula/scikit-learn,andaag/scikit-learn,lesteve/scikit-learn,samuel1208/scikit-learn,alexeyum/scikit-learn,huzq/scikit-learn,frank-tancf/scikit-learn,Jimmy-Morzaria/scikit-learn,untom/scikit-learn,lazywei/scikit-learn,nhejazi/scikit-learn,ilyes14/scikit-learn,vinayak-mehta/scikit-learn,schets/scikit-learn,quheng/scikit-learn,xuewei4d/scikit-learn,sarahgrogan/scikit-learn,thilbern/scikit-learn,manhhomienbienthuy/scikit-learn,mfjb/scikit-learn,mjudsp/Tsallis,dhruv13J/scikit-learn,Aasmi/scikit-learn,xzh86/scikit-learn,jm-begon/scikit-learn,victorbergelin/scikit-learn,hlin117/scikit-learn,costypetrisor/scikit-learn,bikong2/scikit-learn,justincassidy/scikit-learn,vybstat/scikit-learn,gclenaghan/scikit-learn,fabioticconi/scikit-learn,MatthieuBizien/scikit-learn,marcocaccin/scikit-learn,IndraVikas/scikit-learn,imaculate/scikit-learn,madjelan/scikit-learn,jmschrei/scikit-learn,ankurankan/scikit-learn,raghavrv/scikit-learn,mjgrav2001/scikit-learn,icdishb/scikit-learn,466152112/scikit-learn,rrohan/scikit-learn,toastedcornflakes/scikit-learn,dsullivan7/scikit-learn,hrjn/scikit-learn,loli/sklearn-ensembletrees,nikitasingh981/scikit-learn,kevin-intel/scikit-learn,hitszxp/scikit-learn,jmschrei/scikit-learn,jzt5132/scikit-learn,xwolf12/scikit-learn,lin-credible/scikit-learn,davidgbe/scikit-learn,samuel1208/scikit-learn,davidgbe/scikit-learn,arjoly/scikit-learn,ssaeger/scikit-learn,gclenaghan/scikit-learn,jseabold/scikit-learn,nikitasingh981/scikit-learn,ndingwall/scikit-learn,themrmax/scikit-learn,MechCoder/scikit-learn,PrashntS/scikit-learn,JeanKossaifi/scikit-learn,cwu2011/scikit-learn,HolgerPeters/scikit-learn,vermouthmjl/scikit-learn,pv/scikit-learn,466152112/scikit-learn,chrsrds/scikit-learn,rishikksh20/scikit-learn,mxjl620/scikit-learn,theoryno3/scikit-learn,kylerbrown/scikit-learn,ishanic/scikit-learn,0x0all/scikit-learn,mattilyra/scikit-learn,adamgreenhall/scikit-learn,kaichogami/scikit-learn,saiwing-yeung/scikit-learn,treycausey/scikit-learn,shahankhatch/scikit-learn,Clyde-fare/scikit-learn,vigilv/scikit-learn,waterponey/scikit-learn,fabianp/scikit-learn,IndraVikas/scikit-learn,jakirkham/scikit-learn,Nyker510/scikit-learn,0asa/scikit-learn,ldirer/scikit-learn,RomainBrault/scikit-learn,jaidevd/scikit-learn,lbishal/scikit-learn,eickenberg/scikit-learn,jakirkham/scikit-learn,xyguo/scikit-learn,maheshakya/scikit-learn,RomainBrault/scikit-learn,vermouthmjl/scikit-learn,dhruv13J/scikit-learn,phdowling/scikit-learn,kjung/scikit-learn,russel1237/scikit-learn,jzt5132/scikit-learn,Clyde-fare/scikit-learn,spallavolu/scikit-learn,tdhopper/scikit-learn,devanshdalal/scikit-learn,jpautom/scikit-learn,hlin117/scikit-learn,manhhomienbienthuy/scikit-learn,akionakamura/scikit-learn,kevin-intel/scikit-learn,lenovor/scikit-learn,f3r/scikit-learn,Achuth17/scikit-learn,3manuek/scikit-learn,akionakamura/scikit-learn,akionakamura/scikit-learn,jayflo/scikit-learn,poryfly/scikit-learn,xuewei4d/scikit-learn,carrillo/scikit-learn,shusenl/scikit-learn,0asa/scikit-learn,jpautom/scikit-learn,vshtanko/scikit-learn,ssaeger/scikit-learn,zorojean/scikit-learn,vibhorag/scikit-learn,joshloyal/scikit-learn,gotomypc/scikit-learn,mhue/scikit-learn,kagayakidan/scikit-learn,aewhatley/scikit-learn,sinhrks/scikit-learn,mhdella/scikit-learn,wanggang3333/scikit-learn,smartscheduling/scikit-learn-categorical-tree,NelisVerhoef/scikit-learn,olologin/scikit-learn,nmayorov/scikit-learn,billy-inn/scikit-learn,JosmanPS/scikit-learn,cl4rke/scikit-learn,ndingwall/scikit-learn,liangz0707/scikit-learn,justincassidy/scikit-learn,CforED/Machine-Learning,quheng/scikit-learn,AlexRobson/scikit-learn,icdishb/scikit-learn,ltiao/scikit-learn,zuku1985/scikit-learn,IshankGulati/scikit-learn,nelson-liu/scikit-learn,mlyundin/scikit-learn,mhue/scikit-learn,RayMick/scikit-learn,shusenl/scikit-learn,xyguo/scikit-learn,zorojean/scikit-learn,DonBeo/scikit-learn,JsNoNo/scikit-learn,jorge2703/scikit-learn,tmhm/scikit-learn,moutai/scikit-learn,voxlol/scikit-learn,eg-zhang/scikit-learn,victorbergelin/scikit-learn,thilbern/scikit-learn,rishikksh20/scikit-learn,MartinDelzant/scikit-learn,lenovor/scikit-learn,jblackburne/scikit-learn,nrhine1/scikit-learn,Clyde-fare/scikit-learn,IshankGulati/scikit-learn,NelisVerhoef/scikit-learn,LiaoPan/scikit-learn,NunoEdgarGub1/scikit-learn,cauchycui/scikit-learn,nesterione/scikit-learn,PrashntS/scikit-learn,lbishal/scikit-learn,yunfeilu/scikit-learn,sarahgrogan/scikit-learn,terkkila/scikit-learn
|
Add implementation of RANSAC algorithm
|
import numpy as np
def ransac(X, y, estimator_cls, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=1, estimator_kwargs={}):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
"""
best_estimator = None
best_n_inliers = 0
best_score = np.inf
best_inlier_mask = None
best_inlier_X = None
best_inlier_y = None
# estimator used for all iterations and for output
estimator = estimator_cls(**estimator_kwargs)
# number of data samples
n_samples = X.shape[0]
for _ in range(max_trials):
# choose random sample set
random_idxs = np.random.randint(0, n_samples, min_samples)
rsample_X = X[random_idxs]
rsample_y = y[random_idxs]
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(X, y):
continue
# fit model for current random sample set
estimator.fit(rsample_X, rsample_y)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(estimator,
rsample_X,
rsample_y):
continue
# residuals of all data for current random sample model
rsample_residuals = np.abs(estimator.predict(rsample_X) - rsample_y)
# classify data into inliers and outliers
rsample_inlier_mask = rsample_residuals < residual_threshold
rsample_n_inliers = np.sum(rsample_inlier_mask)
# less inliers -> skip current random sample
if rsample_n_inliers < best_n_inliers:
continue
# extract inlier data set
rsample_inlier_X = X[rsample_inlier_mask]
rsample_inlier_y = y[rsample_inlier_mask]
# score of inlier data set
rsample_score = estimator.score(rsample_inlier_X, rsample_inlier_y)
# same number of inliers but worse score -> skip current random sample
if rsample_n_inliers == best_n_inliers and rsample_score < best_score:
continue
# save current random sample as best sample
best_n_inliers = rsample_n_inliers
best_score = rsample_score
best_inlier_mask = rsample_inlier_mask
best_inlier_X = rsample_inlier_X
best_inlier_y = rsample_inlier_y
# break if sufficient number of inliers or score is reached
if best_n_inliers >= stop_n_inliers or best_score >= stop_score:
break
# if none of the iterations met the required criteria
if best_inlier_mask is None:
return None, None
# estimate final model using all inliers
estimator.fit(best_inlier_X, best_inlier_y)
return estimator, best_inlier_mask
|
<commit_before><commit_msg>Add implementation of RANSAC algorithm<commit_after>
|
import numpy as np
def ransac(X, y, estimator_cls, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=1, estimator_kwargs={}):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
"""
best_estimator = None
best_n_inliers = 0
best_score = np.inf
best_inlier_mask = None
best_inlier_X = None
best_inlier_y = None
# estimator used for all iterations and for output
estimator = estimator_cls(**estimator_kwargs)
# number of data samples
n_samples = X.shape[0]
for _ in range(max_trials):
# choose random sample set
random_idxs = np.random.randint(0, n_samples, min_samples)
rsample_X = X[random_idxs]
rsample_y = y[random_idxs]
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(X, y):
continue
# fit model for current random sample set
estimator.fit(rsample_X, rsample_y)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(estimator,
rsample_X,
rsample_y):
continue
# residuals of all data for current random sample model
rsample_residuals = np.abs(estimator.predict(rsample_X) - rsample_y)
# classify data into inliers and outliers
rsample_inlier_mask = rsample_residuals < residual_threshold
rsample_n_inliers = np.sum(rsample_inlier_mask)
# less inliers -> skip current random sample
if rsample_n_inliers < best_n_inliers:
continue
# extract inlier data set
rsample_inlier_X = X[rsample_inlier_mask]
rsample_inlier_y = y[rsample_inlier_mask]
# score of inlier data set
rsample_score = estimator.score(rsample_inlier_X, rsample_inlier_y)
# same number of inliers but worse score -> skip current random sample
if rsample_n_inliers == best_n_inliers and rsample_score < best_score:
continue
# save current random sample as best sample
best_n_inliers = rsample_n_inliers
best_score = rsample_score
best_inlier_mask = rsample_inlier_mask
best_inlier_X = rsample_inlier_X
best_inlier_y = rsample_inlier_y
# break if sufficient number of inliers or score is reached
if best_n_inliers >= stop_n_inliers or best_score >= stop_score:
break
# if none of the iterations met the required criteria
if best_inlier_mask is None:
return None, None
# estimate final model using all inliers
estimator.fit(best_inlier_X, best_inlier_y)
return estimator, best_inlier_mask
|
Add implementation of RANSAC algorithmimport numpy as np
def ransac(X, y, estimator_cls, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=1, estimator_kwargs={}):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
"""
best_estimator = None
best_n_inliers = 0
best_score = np.inf
best_inlier_mask = None
best_inlier_X = None
best_inlier_y = None
# estimator used for all iterations and for output
estimator = estimator_cls(**estimator_kwargs)
# number of data samples
n_samples = X.shape[0]
for _ in range(max_trials):
# choose random sample set
random_idxs = np.random.randint(0, n_samples, min_samples)
rsample_X = X[random_idxs]
rsample_y = y[random_idxs]
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(X, y):
continue
# fit model for current random sample set
estimator.fit(rsample_X, rsample_y)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(estimator,
rsample_X,
rsample_y):
continue
# residuals of all data for current random sample model
rsample_residuals = np.abs(estimator.predict(rsample_X) - rsample_y)
# classify data into inliers and outliers
rsample_inlier_mask = rsample_residuals < residual_threshold
rsample_n_inliers = np.sum(rsample_inlier_mask)
# less inliers -> skip current random sample
if rsample_n_inliers < best_n_inliers:
continue
# extract inlier data set
rsample_inlier_X = X[rsample_inlier_mask]
rsample_inlier_y = y[rsample_inlier_mask]
# score of inlier data set
rsample_score = estimator.score(rsample_inlier_X, rsample_inlier_y)
# same number of inliers but worse score -> skip current random sample
if rsample_n_inliers == best_n_inliers and rsample_score < best_score:
continue
# save current random sample as best sample
best_n_inliers = rsample_n_inliers
best_score = rsample_score
best_inlier_mask = rsample_inlier_mask
best_inlier_X = rsample_inlier_X
best_inlier_y = rsample_inlier_y
# break if sufficient number of inliers or score is reached
if best_n_inliers >= stop_n_inliers or best_score >= stop_score:
break
# if none of the iterations met the required criteria
if best_inlier_mask is None:
return None, None
# estimate final model using all inliers
estimator.fit(best_inlier_X, best_inlier_y)
return estimator, best_inlier_mask
|
<commit_before><commit_msg>Add implementation of RANSAC algorithm<commit_after>import numpy as np
def ransac(X, y, estimator_cls, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=1, estimator_kwargs={}):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
"""
best_estimator = None
best_n_inliers = 0
best_score = np.inf
best_inlier_mask = None
best_inlier_X = None
best_inlier_y = None
# estimator used for all iterations and for output
estimator = estimator_cls(**estimator_kwargs)
# number of data samples
n_samples = X.shape[0]
for _ in range(max_trials):
# choose random sample set
random_idxs = np.random.randint(0, n_samples, min_samples)
rsample_X = X[random_idxs]
rsample_y = y[random_idxs]
# check if random sample set is valid
if is_data_valid is not None and not is_data_valid(X, y):
continue
# fit model for current random sample set
estimator.fit(rsample_X, rsample_y)
# check if estimated model is valid
if is_model_valid is not None and not is_model_valid(estimator,
rsample_X,
rsample_y):
continue
# residuals of all data for current random sample model
rsample_residuals = np.abs(estimator.predict(rsample_X) - rsample_y)
# classify data into inliers and outliers
rsample_inlier_mask = rsample_residuals < residual_threshold
rsample_n_inliers = np.sum(rsample_inlier_mask)
# less inliers -> skip current random sample
if rsample_n_inliers < best_n_inliers:
continue
# extract inlier data set
rsample_inlier_X = X[rsample_inlier_mask]
rsample_inlier_y = y[rsample_inlier_mask]
# score of inlier data set
rsample_score = estimator.score(rsample_inlier_X, rsample_inlier_y)
# same number of inliers but worse score -> skip current random sample
if rsample_n_inliers == best_n_inliers and rsample_score < best_score:
continue
# save current random sample as best sample
best_n_inliers = rsample_n_inliers
best_score = rsample_score
best_inlier_mask = rsample_inlier_mask
best_inlier_X = rsample_inlier_X
best_inlier_y = rsample_inlier_y
# break if sufficient number of inliers or score is reached
if best_n_inliers >= stop_n_inliers or best_score >= stop_score:
break
# if none of the iterations met the required criteria
if best_inlier_mask is None:
return None, None
# estimate final model using all inliers
estimator.fit(best_inlier_X, best_inlier_y)
return estimator, best_inlier_mask
|
|
a038337824d88a01293711483390e67a2c969df0
|
catapult_base/__init__.py
|
catapult_base/__init__.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All files in this directory should be moved to catapult/base/ after moving
# to the new repo.
|
Add catapult_base folder to tools/telemetry to make the refactor easier.
|
Add catapult_base folder to tools/telemetry to make the refactor easier.
This will make some of the refactoring more obvious and easy to review, as
well as making the needed reafctoring after moving to the catapult repo easier.
BUG=473414
Review URL: https://codereview.chromium.org/1168263002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}
|
Python
|
bsd-3-clause
|
catapult-project/catapult,sahiljain/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,sahiljain/catapult,sahiljain/catapult,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult
|
Add catapult_base folder to tools/telemetry to make the refactor easier.
This will make some of the refactoring more obvious and easy to review, as
well as making the needed reafctoring after moving to the catapult repo easier.
BUG=473414
Review URL: https://codereview.chromium.org/1168263002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All files in this directory should be moved to catapult/base/ after moving
# to the new repo.
|
<commit_before><commit_msg>Add catapult_base folder to tools/telemetry to make the refactor easier.
This will make some of the refactoring more obvious and easy to review, as
well as making the needed reafctoring after moving to the catapult repo easier.
BUG=473414
Review URL: https://codereview.chromium.org/1168263002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}<commit_after>
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All files in this directory should be moved to catapult/base/ after moving
# to the new repo.
|
Add catapult_base folder to tools/telemetry to make the refactor easier.
This will make some of the refactoring more obvious and easy to review, as
well as making the needed reafctoring after moving to the catapult repo easier.
BUG=473414
Review URL: https://codereview.chromium.org/1168263002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All files in this directory should be moved to catapult/base/ after moving
# to the new repo.
|
<commit_before><commit_msg>Add catapult_base folder to tools/telemetry to make the refactor easier.
This will make some of the refactoring more obvious and easy to review, as
well as making the needed reafctoring after moving to the catapult repo easier.
BUG=473414
Review URL: https://codereview.chromium.org/1168263002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# All files in this directory should be moved to catapult/base/ after moving
# to the new repo.
|
|
a8c97f609f2477ddc59cd0ec0f1eb7e3604468f4
|
product_feed.py
|
product_feed.py
|
products = [
'AN4EF3',
'AE93CR',
'A9Q4MK',
'AC97HP',
'AK967J',
'AB2S93',
'AB2JT9',
'AN67HP',
'AK54VQ',
'AD53ZH']
|
Add products for product feed task
|
Add products for product feed task
|
Python
|
mit
|
coolshop-com/coolshop-application-assignment
|
Add products for product feed task
|
products = [
'AN4EF3',
'AE93CR',
'A9Q4MK',
'AC97HP',
'AK967J',
'AB2S93',
'AB2JT9',
'AN67HP',
'AK54VQ',
'AD53ZH']
|
<commit_before><commit_msg>Add products for product feed task<commit_after>
|
products = [
'AN4EF3',
'AE93CR',
'A9Q4MK',
'AC97HP',
'AK967J',
'AB2S93',
'AB2JT9',
'AN67HP',
'AK54VQ',
'AD53ZH']
|
Add products for product feed taskproducts = [
'AN4EF3',
'AE93CR',
'A9Q4MK',
'AC97HP',
'AK967J',
'AB2S93',
'AB2JT9',
'AN67HP',
'AK54VQ',
'AD53ZH']
|
<commit_before><commit_msg>Add products for product feed task<commit_after>products = [
'AN4EF3',
'AE93CR',
'A9Q4MK',
'AC97HP',
'AK967J',
'AB2S93',
'AB2JT9',
'AN67HP',
'AK54VQ',
'AD53ZH']
|
|
9ed2cbc57ca9cf370edc72cd2dc345a555e0ea96
|
CodeFights/findEmailDomain.py
|
CodeFights/findEmailDomain.py
|
#!/usr/local/bin/python
# Code Fights Find Email Domain Problem
def findEmailDomain(address):
index = max([i for i, char in enumerate(address) if char == "@"])
return address[index + 1:]
def main():
tests = [
["prettyandsimple@example.com", "example.com"],
["<>[]:,;@\"!#$%&*+-/=?^_{}| ~.a\"@example.org", "example.org"],
["someaddress@yandex.ru", "yandex.ru"],
["\" \"@xample.org", "xample.org"]
]
for t in tests:
res = findEmailDomain(t[0])
ans = t[1]
if ans == res:
print("PASSED: findEmailDomain({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: findEmailDomain({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights find email domain problem
|
Solve Code Fights find email domain problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights find email domain problem
|
#!/usr/local/bin/python
# Code Fights Find Email Domain Problem
def findEmailDomain(address):
index = max([i for i, char in enumerate(address) if char == "@"])
return address[index + 1:]
def main():
tests = [
["prettyandsimple@example.com", "example.com"],
["<>[]:,;@\"!#$%&*+-/=?^_{}| ~.a\"@example.org", "example.org"],
["someaddress@yandex.ru", "yandex.ru"],
["\" \"@xample.org", "xample.org"]
]
for t in tests:
res = findEmailDomain(t[0])
ans = t[1]
if ans == res:
print("PASSED: findEmailDomain({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: findEmailDomain({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights find email domain problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Find Email Domain Problem
def findEmailDomain(address):
index = max([i for i, char in enumerate(address) if char == "@"])
return address[index + 1:]
def main():
tests = [
["prettyandsimple@example.com", "example.com"],
["<>[]:,;@\"!#$%&*+-/=?^_{}| ~.a\"@example.org", "example.org"],
["someaddress@yandex.ru", "yandex.ru"],
["\" \"@xample.org", "xample.org"]
]
for t in tests:
res = findEmailDomain(t[0])
ans = t[1]
if ans == res:
print("PASSED: findEmailDomain({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: findEmailDomain({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights find email domain problem#!/usr/local/bin/python
# Code Fights Find Email Domain Problem
def findEmailDomain(address):
index = max([i for i, char in enumerate(address) if char == "@"])
return address[index + 1:]
def main():
tests = [
["prettyandsimple@example.com", "example.com"],
["<>[]:,;@\"!#$%&*+-/=?^_{}| ~.a\"@example.org", "example.org"],
["someaddress@yandex.ru", "yandex.ru"],
["\" \"@xample.org", "xample.org"]
]
for t in tests:
res = findEmailDomain(t[0])
ans = t[1]
if ans == res:
print("PASSED: findEmailDomain({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: findEmailDomain({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights find email domain problem<commit_after>#!/usr/local/bin/python
# Code Fights Find Email Domain Problem
def findEmailDomain(address):
index = max([i for i, char in enumerate(address) if char == "@"])
return address[index + 1:]
def main():
tests = [
["prettyandsimple@example.com", "example.com"],
["<>[]:,;@\"!#$%&*+-/=?^_{}| ~.a\"@example.org", "example.org"],
["someaddress@yandex.ru", "yandex.ru"],
["\" \"@xample.org", "xample.org"]
]
for t in tests:
res = findEmailDomain(t[0])
ans = t[1]
if ans == res:
print("PASSED: findEmailDomain({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: findEmailDomain({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
cc0323b0eacb881ff8c83d14192f7dbf1bcb9419
|
astrobin/management/commands/migrate_donors_to_premium.py
|
astrobin/management/commands/migrate_donors_to_premium.py
|
from django.core.management.base import BaseCommand
from datetime import date, timedelta
from paypal.standard.ipn import models
from subscription.models import Subscription, UserSubscription, Transaction
class Command(BaseCommand):
def __init__(self):
self.premium_subscription = Subscription.objects.get(name = 'AstroBin Premium')
def process_user(self, user, amount, first_payment):
price = 36.0
days_paid = int(float(amount)/price * 365.25)
expires = (first_payment + timedelta(days_paid)).date()
if expires > date.today():
us, created = UserSubscription.objects.get_or_create(
user = user,
subscription = self.premium_subscription,
expires = expires,
cancelled = False)
us.fix()
print "%.2f \t %d \t %s \t %s \t\t %s <%s>" % (
amount,
days_paid,
first_payment.strftime('%b, %d'),
expires.strftime('%b, %d %Y'),
user,
user.email)
def handle(self, *args, **options):
SUBSCRIPTION_NAMES = (
'AstroBin Donor',
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
"""
{
<user>: {
amount: 100,
first_payment: 1234567890.0
}
}
"""
data = dict()
for transaction in Transaction.objects.filter(
subscription__name__in = SUBSCRIPTION_NAMES,
event = "subscription payment",
timestamp__year = date.today().year).order_by('timestamp'):
if transaction.user not in data:
data[transaction.user] = {
"amount": transaction.amount,
"first_payment": transaction.timestamp
}
else:
data[transaction.user]["amount"] += transaction.amount
for user, values in data.iteritems():
amount = values["amount"]
first_payment = values["first_payment"]
self.process_user(user, amount, first_payment)
|
Add script to migrate donors to premium
|
Add script to migrate donors to premium
|
Python
|
agpl-3.0
|
astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin
|
Add script to migrate donors to premium
|
from django.core.management.base import BaseCommand
from datetime import date, timedelta
from paypal.standard.ipn import models
from subscription.models import Subscription, UserSubscription, Transaction
class Command(BaseCommand):
def __init__(self):
self.premium_subscription = Subscription.objects.get(name = 'AstroBin Premium')
def process_user(self, user, amount, first_payment):
price = 36.0
days_paid = int(float(amount)/price * 365.25)
expires = (first_payment + timedelta(days_paid)).date()
if expires > date.today():
us, created = UserSubscription.objects.get_or_create(
user = user,
subscription = self.premium_subscription,
expires = expires,
cancelled = False)
us.fix()
print "%.2f \t %d \t %s \t %s \t\t %s <%s>" % (
amount,
days_paid,
first_payment.strftime('%b, %d'),
expires.strftime('%b, %d %Y'),
user,
user.email)
def handle(self, *args, **options):
SUBSCRIPTION_NAMES = (
'AstroBin Donor',
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
"""
{
<user>: {
amount: 100,
first_payment: 1234567890.0
}
}
"""
data = dict()
for transaction in Transaction.objects.filter(
subscription__name__in = SUBSCRIPTION_NAMES,
event = "subscription payment",
timestamp__year = date.today().year).order_by('timestamp'):
if transaction.user not in data:
data[transaction.user] = {
"amount": transaction.amount,
"first_payment": transaction.timestamp
}
else:
data[transaction.user]["amount"] += transaction.amount
for user, values in data.iteritems():
amount = values["amount"]
first_payment = values["first_payment"]
self.process_user(user, amount, first_payment)
|
<commit_before><commit_msg>Add script to migrate donors to premium<commit_after>
|
from django.core.management.base import BaseCommand
from datetime import date, timedelta
from paypal.standard.ipn import models
from subscription.models import Subscription, UserSubscription, Transaction
class Command(BaseCommand):
def __init__(self):
self.premium_subscription = Subscription.objects.get(name = 'AstroBin Premium')
def process_user(self, user, amount, first_payment):
price = 36.0
days_paid = int(float(amount)/price * 365.25)
expires = (first_payment + timedelta(days_paid)).date()
if expires > date.today():
us, created = UserSubscription.objects.get_or_create(
user = user,
subscription = self.premium_subscription,
expires = expires,
cancelled = False)
us.fix()
print "%.2f \t %d \t %s \t %s \t\t %s <%s>" % (
amount,
days_paid,
first_payment.strftime('%b, %d'),
expires.strftime('%b, %d %Y'),
user,
user.email)
def handle(self, *args, **options):
SUBSCRIPTION_NAMES = (
'AstroBin Donor',
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
"""
{
<user>: {
amount: 100,
first_payment: 1234567890.0
}
}
"""
data = dict()
for transaction in Transaction.objects.filter(
subscription__name__in = SUBSCRIPTION_NAMES,
event = "subscription payment",
timestamp__year = date.today().year).order_by('timestamp'):
if transaction.user not in data:
data[transaction.user] = {
"amount": transaction.amount,
"first_payment": transaction.timestamp
}
else:
data[transaction.user]["amount"] += transaction.amount
for user, values in data.iteritems():
amount = values["amount"]
first_payment = values["first_payment"]
self.process_user(user, amount, first_payment)
|
Add script to migrate donors to premiumfrom django.core.management.base import BaseCommand
from datetime import date, timedelta
from paypal.standard.ipn import models
from subscription.models import Subscription, UserSubscription, Transaction
class Command(BaseCommand):
def __init__(self):
self.premium_subscription = Subscription.objects.get(name = 'AstroBin Premium')
def process_user(self, user, amount, first_payment):
price = 36.0
days_paid = int(float(amount)/price * 365.25)
expires = (first_payment + timedelta(days_paid)).date()
if expires > date.today():
us, created = UserSubscription.objects.get_or_create(
user = user,
subscription = self.premium_subscription,
expires = expires,
cancelled = False)
us.fix()
print "%.2f \t %d \t %s \t %s \t\t %s <%s>" % (
amount,
days_paid,
first_payment.strftime('%b, %d'),
expires.strftime('%b, %d %Y'),
user,
user.email)
def handle(self, *args, **options):
SUBSCRIPTION_NAMES = (
'AstroBin Donor',
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
"""
{
<user>: {
amount: 100,
first_payment: 1234567890.0
}
}
"""
data = dict()
for transaction in Transaction.objects.filter(
subscription__name__in = SUBSCRIPTION_NAMES,
event = "subscription payment",
timestamp__year = date.today().year).order_by('timestamp'):
if transaction.user not in data:
data[transaction.user] = {
"amount": transaction.amount,
"first_payment": transaction.timestamp
}
else:
data[transaction.user]["amount"] += transaction.amount
for user, values in data.iteritems():
amount = values["amount"]
first_payment = values["first_payment"]
self.process_user(user, amount, first_payment)
|
<commit_before><commit_msg>Add script to migrate donors to premium<commit_after>from django.core.management.base import BaseCommand
from datetime import date, timedelta
from paypal.standard.ipn import models
from subscription.models import Subscription, UserSubscription, Transaction
class Command(BaseCommand):
def __init__(self):
self.premium_subscription = Subscription.objects.get(name = 'AstroBin Premium')
def process_user(self, user, amount, first_payment):
price = 36.0
days_paid = int(float(amount)/price * 365.25)
expires = (first_payment + timedelta(days_paid)).date()
if expires > date.today():
us, created = UserSubscription.objects.get_or_create(
user = user,
subscription = self.premium_subscription,
expires = expires,
cancelled = False)
us.fix()
print "%.2f \t %d \t %s \t %s \t\t %s <%s>" % (
amount,
days_paid,
first_payment.strftime('%b, %d'),
expires.strftime('%b, %d %Y'),
user,
user.email)
def handle(self, *args, **options):
SUBSCRIPTION_NAMES = (
'AstroBin Donor',
'AstroBin Donor Coffee Monthly',
'AstroBin Donor Snack Monthly',
'AstroBin Donor Pizza Monthly',
'AstroBin Donor Movie Monthly',
'AstorBin Donor Dinner Monthly',
'AstroBin Donor Coffee Yearly',
'AstroBin Donor Snack Yearly',
'AstroBin Donor Pizza Yearly',
'AstroBin Donor Movie Yearly',
'AstorBin Donor Dinner Yearly',
)
"""
{
<user>: {
amount: 100,
first_payment: 1234567890.0
}
}
"""
data = dict()
for transaction in Transaction.objects.filter(
subscription__name__in = SUBSCRIPTION_NAMES,
event = "subscription payment",
timestamp__year = date.today().year).order_by('timestamp'):
if transaction.user not in data:
data[transaction.user] = {
"amount": transaction.amount,
"first_payment": transaction.timestamp
}
else:
data[transaction.user]["amount"] += transaction.amount
for user, values in data.iteritems():
amount = values["amount"]
first_payment = values["first_payment"]
self.process_user(user, amount, first_payment)
|
|
3e785db34bad58aa1300f66f05f4acad8d7b34c3
|
interactive_crop.py
|
interactive_crop.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--x', type=click.INT, prompt=True)
@click.option('--y', type=click.INT, prompt=True)
@click.option('--w', type=click.INT, prompt=True)
@click.option('--h', type=click.INT, prompt=True)
def crop(path, x, y, w, h):
for f in get_file_list(path):
img = Image.open(f)
img.crop((x, y, x + w, y + h)).save(f)
if __name__ == '__main__':
crop()
|
Add interactive version of crop script
|
Add interactive version of crop script
|
Python
|
mit
|
vladimirgamalian/pictools
|
Add interactive version of crop script
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--x', type=click.INT, prompt=True)
@click.option('--y', type=click.INT, prompt=True)
@click.option('--w', type=click.INT, prompt=True)
@click.option('--h', type=click.INT, prompt=True)
def crop(path, x, y, w, h):
for f in get_file_list(path):
img = Image.open(f)
img.crop((x, y, x + w, y + h)).save(f)
if __name__ == '__main__':
crop()
|
<commit_before><commit_msg>Add interactive version of crop script<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--x', type=click.INT, prompt=True)
@click.option('--y', type=click.INT, prompt=True)
@click.option('--w', type=click.INT, prompt=True)
@click.option('--h', type=click.INT, prompt=True)
def crop(path, x, y, w, h):
for f in get_file_list(path):
img = Image.open(f)
img.crop((x, y, x + w, y + h)).save(f)
if __name__ == '__main__':
crop()
|
Add interactive version of crop script#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--x', type=click.INT, prompt=True)
@click.option('--y', type=click.INT, prompt=True)
@click.option('--w', type=click.INT, prompt=True)
@click.option('--h', type=click.INT, prompt=True)
def crop(path, x, y, w, h):
for f in get_file_list(path):
img = Image.open(f)
img.crop((x, y, x + w, y + h)).save(f)
if __name__ == '__main__':
crop()
|
<commit_before><commit_msg>Add interactive version of crop script<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image
from utils import get_file_list
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--x', type=click.INT, prompt=True)
@click.option('--y', type=click.INT, prompt=True)
@click.option('--w', type=click.INT, prompt=True)
@click.option('--h', type=click.INT, prompt=True)
def crop(path, x, y, w, h):
for f in get_file_list(path):
img = Image.open(f)
img.crop((x, y, x + w, y + h)).save(f)
if __name__ == '__main__':
crop()
|
|
aa2b62a19c8a988f2582a5e13475322e6efb4880
|
src/collectors/hbase/hbase.py
|
src/collectors/hbase/hbase.py
|
# coding=utf-8
"""
Diamond collector for HBase metrics, see:
"""
from diamond.metric import Metric
import diamond.collector
import glob
import re
import os
class HBaseCollector(diamond.collector.Collector):
re_log = re.compile(r'^(?P<timestamp>\d+) (?P<name>\S+): (?P<metrics>.*)$')
def get_default_config_help(self):
config_help = super(HBaseCollector, self).get_default_config_help()
config_help.update({
'metrics': "List of paths to process metrics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'method': 'Threaded',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config
def collect(self):
for pattern in self.config['metrics']:
for filename in glob.glob(pattern):
self.collect_from(filename)
def collect_from(self, filename):
if not os.access(filename, os.R_OK):
self.log.error('HBaseCollector unable to read "%s"', filename)
return False
fd = open(filename, 'r+')
for line in fd:
match = self.re_log.match(line)
if not match:
continue
metrics = {}
data = match.groupdict()
for metric in data['metrics'].split(','):
metric = metric.strip()
if '=' in metric:
key, value = metric.split('=', 1)
metrics[key] = value
for metric in metrics.keys():
try:
if data['name'] == 'jvm.metrics':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['processName'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'mapred.job':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['group'].replace(' ', '_'),
metrics['counter'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'rpc.metrics':
if metric == 'port':
continue
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['port'],
metric, ]))
else:
path = self.get_metric_path('.'.join([
data['name'],
metric, ]))
value = float(metrics[metric])
self.publish_metric(Metric(path,
value,
timestamp=int(data['timestamp'])/1000))
except ValueError:
pass
fd.seek(0)
fd.truncate()
fd.close()
|
Add a HBase metrics collector.
|
Add a HBase metrics collector.
This change adds a basic hbase metrics collector that works in the same
way that the hadoop collector does.
|
Python
|
mit
|
thardie/Diamond,python-diamond/Diamond,CYBERBUGJR/Diamond,Ssawa/Diamond,acquia/Diamond,Ormod/Diamond,acquia/Diamond,szibis/Diamond,Ssawa/Diamond,hvnsweeting/Diamond,actmd/Diamond,jumping/Diamond,saucelabs/Diamond,works-mobile/Diamond,signalfx/Diamond,signalfx/Diamond,stuartbfox/Diamond,russss/Diamond,CYBERBUGJR/Diamond,jriguera/Diamond,anandbhoraskar/Diamond,skbkontur/Diamond,Slach/Diamond,CYBERBUGJR/Diamond,stuartbfox/Diamond,TAKEALOT/Diamond,sebbrandt87/Diamond,signalfx/Diamond,socialwareinc/Diamond,EzyInsights/Diamond,rtoma/Diamond,acquia/Diamond,cannium/Diamond,python-diamond/Diamond,tusharmakkar08/Diamond,TAKEALOT/Diamond,Ensighten/Diamond,jumping/Diamond,dcsquared13/Diamond,janisz/Diamond-1,h00dy/Diamond,tuenti/Diamond,bmhatfield/Diamond,codepython/Diamond,saucelabs/Diamond,MichaelDoyle/Diamond,works-mobile/Diamond,saucelabs/Diamond,jriguera/Diamond,Clever/Diamond,Netuitive/Diamond,Ensighten/Diamond,hamelg/Diamond,Basis/Diamond,Ssawa/Diamond,mfriedenhagen/Diamond,jumping/Diamond,MichaelDoyle/Diamond,rtoma/Diamond,skbkontur/Diamond,timchenxiaoyu/Diamond,mfriedenhagen/Diamond,tellapart/Diamond,EzyInsights/Diamond,mzupan/Diamond,eMerzh/Diamond-1,Netuitive/netuitive-diamond,Ormod/Diamond,tellapart/Diamond,gg7/diamond,jumping/Diamond,python-diamond/Diamond,Precis/Diamond,Precis/Diamond,tusharmakkar08/Diamond,Basis/Diamond,joel-airspring/Diamond,actmd/Diamond,joel-airspring/Diamond,mfriedenhagen/Diamond,tusharmakkar08/Diamond,Netuitive/netuitive-diamond,cannium/Diamond,Ormod/Diamond,zoidbergwill/Diamond,sebbrandt87/Diamond,thardie/Diamond,Basis/Diamond,Ssawa/Diamond,rtoma/Diamond,rtoma/Diamond,stuartbfox/Diamond,krbaker/Diamond,jaingaurav/Diamond,tuenti/Diamond,Precis/Diamond,TAKEALOT/Diamond,Netuitive/netuitive-diamond,ramjothikumar/Diamond,timchenxiaoyu/Diamond,timchenxiaoyu/Diamond,anandbhoraskar/Diamond,h00dy/Diamond,jaingaurav/Diamond,codepython/Diamond,hamelg/Diamond,zoidbergwill/Diamond,sebbrandt87/Diamond,gg7/diamond,joel-airspring/Diamond,ramjothikumar/Diamond,Netuitive/Diamond,Nihn/Diamond-1,Slach/Diamond,sebbrandt87/Diamond,ramjothikumar/Diamond,socialwareinc/Diamond,works-mobile/Diamond,Ensighten/Diamond,jriguera/Diamond,mfriedenhagen/Diamond,socialwareinc/Diamond,bmhatfield/Diamond,disqus/Diamond,EzyInsights/Diamond,eMerzh/Diamond-1,Ensighten/Diamond,mzupan/Diamond,Clever/Diamond,tellapart/Diamond,h00dy/Diamond,TinLe/Diamond,Ormod/Diamond,TAKEALOT/Diamond,Netuitive/Diamond,anandbhoraskar/Diamond,jriguera/Diamond,tuenti/Diamond,Netuitive/netuitive-diamond,disqus/Diamond,acquia/Diamond,zoidbergwill/Diamond,eMerzh/Diamond-1,anandbhoraskar/Diamond,Nihn/Diamond-1,russss/Diamond,stuartbfox/Diamond,thardie/Diamond,Nihn/Diamond-1,janisz/Diamond-1,mzupan/Diamond,jaingaurav/Diamond,bmhatfield/Diamond,CYBERBUGJR/Diamond,russss/Diamond,cannium/Diamond,tusharmakkar08/Diamond,hvnsweeting/Diamond,saucelabs/Diamond,actmd/Diamond,tuenti/Diamond,codepython/Diamond,skbkontur/Diamond,hamelg/Diamond,hamelg/Diamond,bmhatfield/Diamond,hvnsweeting/Diamond,Clever/Diamond,hvnsweeting/Diamond,joel-airspring/Diamond,MichaelDoyle/Diamond,dcsquared13/Diamond,szibis/Diamond,disqus/Diamond,timchenxiaoyu/Diamond,signalfx/Diamond,jaingaurav/Diamond,works-mobile/Diamond,gg7/diamond,tellapart/Diamond,skbkontur/Diamond,krbaker/Diamond,TinLe/Diamond,janisz/Diamond-1,mzupan/Diamond,Clever/Diamond,actmd/Diamond,h00dy/Diamond,TinLe/Diamond,krbaker/Diamond,zoidbergwill/Diamond,eMerzh/Diamond-1,EzyInsights/Diamond,russss/Diamond,codepython/Diamond,Slach/Diamond,thardie/Diamond,gg7/diamond,szibis/Diamond,cannium/Diamond,dcsquared13/Diamond,MichaelDoyle/Diamond,szibis/Diamond,Precis/Diamond,Nihn/Diamond-1,ramjothikumar/Diamond,janisz/Diamond-1,dcsquared13/Diamond,krbaker/Diamond,Slach/Diamond,Basis/Diamond,TinLe/Diamond,Netuitive/Diamond,socialwareinc/Diamond
|
Add a HBase metrics collector.
This change adds a basic hbase metrics collector that works in the same
way that the hadoop collector does.
|
# coding=utf-8
"""
Diamond collector for HBase metrics, see:
"""
from diamond.metric import Metric
import diamond.collector
import glob
import re
import os
class HBaseCollector(diamond.collector.Collector):
re_log = re.compile(r'^(?P<timestamp>\d+) (?P<name>\S+): (?P<metrics>.*)$')
def get_default_config_help(self):
config_help = super(HBaseCollector, self).get_default_config_help()
config_help.update({
'metrics': "List of paths to process metrics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'method': 'Threaded',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config
def collect(self):
for pattern in self.config['metrics']:
for filename in glob.glob(pattern):
self.collect_from(filename)
def collect_from(self, filename):
if not os.access(filename, os.R_OK):
self.log.error('HBaseCollector unable to read "%s"', filename)
return False
fd = open(filename, 'r+')
for line in fd:
match = self.re_log.match(line)
if not match:
continue
metrics = {}
data = match.groupdict()
for metric in data['metrics'].split(','):
metric = metric.strip()
if '=' in metric:
key, value = metric.split('=', 1)
metrics[key] = value
for metric in metrics.keys():
try:
if data['name'] == 'jvm.metrics':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['processName'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'mapred.job':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['group'].replace(' ', '_'),
metrics['counter'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'rpc.metrics':
if metric == 'port':
continue
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['port'],
metric, ]))
else:
path = self.get_metric_path('.'.join([
data['name'],
metric, ]))
value = float(metrics[metric])
self.publish_metric(Metric(path,
value,
timestamp=int(data['timestamp'])/1000))
except ValueError:
pass
fd.seek(0)
fd.truncate()
fd.close()
|
<commit_before><commit_msg>Add a HBase metrics collector.
This change adds a basic hbase metrics collector that works in the same
way that the hadoop collector does.<commit_after>
|
# coding=utf-8
"""
Diamond collector for HBase metrics, see:
"""
from diamond.metric import Metric
import diamond.collector
import glob
import re
import os
class HBaseCollector(diamond.collector.Collector):
re_log = re.compile(r'^(?P<timestamp>\d+) (?P<name>\S+): (?P<metrics>.*)$')
def get_default_config_help(self):
config_help = super(HBaseCollector, self).get_default_config_help()
config_help.update({
'metrics': "List of paths to process metrics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'method': 'Threaded',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config
def collect(self):
for pattern in self.config['metrics']:
for filename in glob.glob(pattern):
self.collect_from(filename)
def collect_from(self, filename):
if not os.access(filename, os.R_OK):
self.log.error('HBaseCollector unable to read "%s"', filename)
return False
fd = open(filename, 'r+')
for line in fd:
match = self.re_log.match(line)
if not match:
continue
metrics = {}
data = match.groupdict()
for metric in data['metrics'].split(','):
metric = metric.strip()
if '=' in metric:
key, value = metric.split('=', 1)
metrics[key] = value
for metric in metrics.keys():
try:
if data['name'] == 'jvm.metrics':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['processName'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'mapred.job':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['group'].replace(' ', '_'),
metrics['counter'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'rpc.metrics':
if metric == 'port':
continue
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['port'],
metric, ]))
else:
path = self.get_metric_path('.'.join([
data['name'],
metric, ]))
value = float(metrics[metric])
self.publish_metric(Metric(path,
value,
timestamp=int(data['timestamp'])/1000))
except ValueError:
pass
fd.seek(0)
fd.truncate()
fd.close()
|
Add a HBase metrics collector.
This change adds a basic hbase metrics collector that works in the same
way that the hadoop collector does.# coding=utf-8
"""
Diamond collector for HBase metrics, see:
"""
from diamond.metric import Metric
import diamond.collector
import glob
import re
import os
class HBaseCollector(diamond.collector.Collector):
re_log = re.compile(r'^(?P<timestamp>\d+) (?P<name>\S+): (?P<metrics>.*)$')
def get_default_config_help(self):
config_help = super(HBaseCollector, self).get_default_config_help()
config_help.update({
'metrics': "List of paths to process metrics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'method': 'Threaded',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config
def collect(self):
for pattern in self.config['metrics']:
for filename in glob.glob(pattern):
self.collect_from(filename)
def collect_from(self, filename):
if not os.access(filename, os.R_OK):
self.log.error('HBaseCollector unable to read "%s"', filename)
return False
fd = open(filename, 'r+')
for line in fd:
match = self.re_log.match(line)
if not match:
continue
metrics = {}
data = match.groupdict()
for metric in data['metrics'].split(','):
metric = metric.strip()
if '=' in metric:
key, value = metric.split('=', 1)
metrics[key] = value
for metric in metrics.keys():
try:
if data['name'] == 'jvm.metrics':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['processName'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'mapred.job':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['group'].replace(' ', '_'),
metrics['counter'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'rpc.metrics':
if metric == 'port':
continue
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['port'],
metric, ]))
else:
path = self.get_metric_path('.'.join([
data['name'],
metric, ]))
value = float(metrics[metric])
self.publish_metric(Metric(path,
value,
timestamp=int(data['timestamp'])/1000))
except ValueError:
pass
fd.seek(0)
fd.truncate()
fd.close()
|
<commit_before><commit_msg>Add a HBase metrics collector.
This change adds a basic hbase metrics collector that works in the same
way that the hadoop collector does.<commit_after># coding=utf-8
"""
Diamond collector for HBase metrics, see:
"""
from diamond.metric import Metric
import diamond.collector
import glob
import re
import os
class HBaseCollector(diamond.collector.Collector):
re_log = re.compile(r'^(?P<timestamp>\d+) (?P<name>\S+): (?P<metrics>.*)$')
def get_default_config_help(self):
config_help = super(HBaseCollector, self).get_default_config_help()
config_help.update({
'metrics': "List of paths to process metrics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HBaseCollector, self).get_default_config()
config.update({
'path': 'hbase',
'method': 'Threaded',
'metrics': ['/var/log/hbase/*.metrics'],
})
return config
def collect(self):
for pattern in self.config['metrics']:
for filename in glob.glob(pattern):
self.collect_from(filename)
def collect_from(self, filename):
if not os.access(filename, os.R_OK):
self.log.error('HBaseCollector unable to read "%s"', filename)
return False
fd = open(filename, 'r+')
for line in fd:
match = self.re_log.match(line)
if not match:
continue
metrics = {}
data = match.groupdict()
for metric in data['metrics'].split(','):
metric = metric.strip()
if '=' in metric:
key, value = metric.split('=', 1)
metrics[key] = value
for metric in metrics.keys():
try:
if data['name'] == 'jvm.metrics':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['processName'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'mapred.job':
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['group'].replace(' ', '_'),
metrics['counter'].replace(' ', '_'),
metric, ]))
elif data['name'] == 'rpc.metrics':
if metric == 'port':
continue
path = self.get_metric_path('.'.join([
data['name'],
metrics['hostName'].replace('.', '_'),
metrics['port'],
metric, ]))
else:
path = self.get_metric_path('.'.join([
data['name'],
metric, ]))
value = float(metrics[metric])
self.publish_metric(Metric(path,
value,
timestamp=int(data['timestamp'])/1000))
except ValueError:
pass
fd.seek(0)
fd.truncate()
fd.close()
|
|
9f81fa58475a352aa009a5009dd31428f574fdd9
|
data/retrieve/retrieve.py
|
data/retrieve/retrieve.py
|
import os
import urllib.request
'''
Checks to see whether the data is present.
To be used to check whether to download OR not.
Insert the name in the following format
'os.path.isfile('NAME_OF_FILE')
'''
if not os.path.isfile('../DNA_TEST_1.txt'):
urllib.request.urlretrieve(url, 'DNA_TEST_1.txt')
|
Create basic structure for test data benchmark module
|
Create basic structure for test data benchmark module
|
Python
|
mit
|
bhaprayan/sagkocana
|
Create basic structure for test data benchmark module
|
import os
import urllib.request
'''
Checks to see whether the data is present.
To be used to check whether to download OR not.
Insert the name in the following format
'os.path.isfile('NAME_OF_FILE')
'''
if not os.path.isfile('../DNA_TEST_1.txt'):
urllib.request.urlretrieve(url, 'DNA_TEST_1.txt')
|
<commit_before><commit_msg>Create basic structure for test data benchmark module<commit_after>
|
import os
import urllib.request
'''
Checks to see whether the data is present.
To be used to check whether to download OR not.
Insert the name in the following format
'os.path.isfile('NAME_OF_FILE')
'''
if not os.path.isfile('../DNA_TEST_1.txt'):
urllib.request.urlretrieve(url, 'DNA_TEST_1.txt')
|
Create basic structure for test data benchmark moduleimport os
import urllib.request
'''
Checks to see whether the data is present.
To be used to check whether to download OR not.
Insert the name in the following format
'os.path.isfile('NAME_OF_FILE')
'''
if not os.path.isfile('../DNA_TEST_1.txt'):
urllib.request.urlretrieve(url, 'DNA_TEST_1.txt')
|
<commit_before><commit_msg>Create basic structure for test data benchmark module<commit_after>import os
import urllib.request
'''
Checks to see whether the data is present.
To be used to check whether to download OR not.
Insert the name in the following format
'os.path.isfile('NAME_OF_FILE')
'''
if not os.path.isfile('../DNA_TEST_1.txt'):
urllib.request.urlretrieve(url, 'DNA_TEST_1.txt')
|
|
de52670eebc4d78aee94cdbabdcb31fcaad31fc3
|
linkprojects.py
|
linkprojects.py
|
import os
rootdir = '/Users/work/development/repositories/pet-projects'
languageDirectories = next(os.walk(rootdir))[1]
if not os.path.islink("all"):
os.symlink(rootdir, "all")
else:
print "Link to ", "all", "exists already"
for directory in languageDirectories:
if(directory.startswith(".")):
continue;
projectDirectories = next(os.walk(rootdir + "/" + directory))[1]
for projectDir in projectDirectories:
fullProjectDirectory = rootdir + "/" + directory + "/" + projectDir
print "Project '", projectDir, "' is in ", fullProjectDirectory
linkName = projectDir
if not os.path.islink(linkName):
target = fullProjectDirectory
os.symlink(target, linkName)
else:
print "Link to ", linkName, "exists already"
|
Add script to create a "quick projects" directory, for fast access without having to dig into repo structure
|
Add script to create a "quick projects" directory, for fast access without having to dig into repo structure
|
Python
|
mit
|
heliosbryan/pet-projects,heliosbryan/pet-projects
|
Add script to create a "quick projects" directory, for fast access without having to dig into repo structure
|
import os
rootdir = '/Users/work/development/repositories/pet-projects'
languageDirectories = next(os.walk(rootdir))[1]
if not os.path.islink("all"):
os.symlink(rootdir, "all")
else:
print "Link to ", "all", "exists already"
for directory in languageDirectories:
if(directory.startswith(".")):
continue;
projectDirectories = next(os.walk(rootdir + "/" + directory))[1]
for projectDir in projectDirectories:
fullProjectDirectory = rootdir + "/" + directory + "/" + projectDir
print "Project '", projectDir, "' is in ", fullProjectDirectory
linkName = projectDir
if not os.path.islink(linkName):
target = fullProjectDirectory
os.symlink(target, linkName)
else:
print "Link to ", linkName, "exists already"
|
<commit_before><commit_msg>Add script to create a "quick projects" directory, for fast access without having to dig into repo structure<commit_after>
|
import os
rootdir = '/Users/work/development/repositories/pet-projects'
languageDirectories = next(os.walk(rootdir))[1]
if not os.path.islink("all"):
os.symlink(rootdir, "all")
else:
print "Link to ", "all", "exists already"
for directory in languageDirectories:
if(directory.startswith(".")):
continue;
projectDirectories = next(os.walk(rootdir + "/" + directory))[1]
for projectDir in projectDirectories:
fullProjectDirectory = rootdir + "/" + directory + "/" + projectDir
print "Project '", projectDir, "' is in ", fullProjectDirectory
linkName = projectDir
if not os.path.islink(linkName):
target = fullProjectDirectory
os.symlink(target, linkName)
else:
print "Link to ", linkName, "exists already"
|
Add script to create a "quick projects" directory, for fast access without having to dig into repo structureimport os
rootdir = '/Users/work/development/repositories/pet-projects'
languageDirectories = next(os.walk(rootdir))[1]
if not os.path.islink("all"):
os.symlink(rootdir, "all")
else:
print "Link to ", "all", "exists already"
for directory in languageDirectories:
if(directory.startswith(".")):
continue;
projectDirectories = next(os.walk(rootdir + "/" + directory))[1]
for projectDir in projectDirectories:
fullProjectDirectory = rootdir + "/" + directory + "/" + projectDir
print "Project '", projectDir, "' is in ", fullProjectDirectory
linkName = projectDir
if not os.path.islink(linkName):
target = fullProjectDirectory
os.symlink(target, linkName)
else:
print "Link to ", linkName, "exists already"
|
<commit_before><commit_msg>Add script to create a "quick projects" directory, for fast access without having to dig into repo structure<commit_after>import os
rootdir = '/Users/work/development/repositories/pet-projects'
languageDirectories = next(os.walk(rootdir))[1]
if not os.path.islink("all"):
os.symlink(rootdir, "all")
else:
print "Link to ", "all", "exists already"
for directory in languageDirectories:
if(directory.startswith(".")):
continue;
projectDirectories = next(os.walk(rootdir + "/" + directory))[1]
for projectDir in projectDirectories:
fullProjectDirectory = rootdir + "/" + directory + "/" + projectDir
print "Project '", projectDir, "' is in ", fullProjectDirectory
linkName = projectDir
if not os.path.islink(linkName):
target = fullProjectDirectory
os.symlink(target, linkName)
else:
print "Link to ", linkName, "exists already"
|
|
adc1de57f41c6868f997a05a7410d72d523d6036
|
cgi-bin/request/gis/wou_outline.py
|
cgi-bin/request/gis/wou_outline.py
|
#!/mesonet/python/bin/python
# Generate a shapefile of the WOU outline.
# 28 Aug 2004 port to iem40
import shapelib, dbflib, mx.DateTime, zipfile, os, sys, shutil, cgi
from pyIEM import wellknowntext, iemdb
i = iemdb.iemdb()
mydb = i["postgis"]
mydb.query("SET TIME ZONE 'GMT'")
# Get CGI vars
form = cgi.FormContent()
year = int(form["year"][0])
etn = int(form["etn"][0])
fp = "watch_%s_%s" % (year, etn)
os.chdir("/tmp/")
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("SIG", dbflib.FTString, 1, 0)
dbf.add_field("ETN", dbflib.FTInteger, 4, 0)
sql = """select astext(multi(geomunion(geom))) as tgeom
from warnings_%s WHERE significance = 'A' and
phenomena IN ('TO','SV') and eventid = %s and
issue < ((select issued from watches WHERE num = %s
and extract(year from issued) = %s) + '30 minutes'::interval)
""" % (year, etn, etn, year)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
sys.exit()
s = rs[0]["tgeom"]
f = wellknowntext.convert_well_known_text(s)
d = {}
d["SIG"] = 'A'
d["ETN"] = etn
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(0, d)
del(obj)
del(shp)
del(dbf)
# Create zip file, send it back to the clients
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
print file(fp+".zip", 'r').read(),
os.remove(fp+".zip")
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
Add tool as per request
|
Add tool as per request
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add tool as per request
|
#!/mesonet/python/bin/python
# Generate a shapefile of the WOU outline.
# 28 Aug 2004 port to iem40
import shapelib, dbflib, mx.DateTime, zipfile, os, sys, shutil, cgi
from pyIEM import wellknowntext, iemdb
i = iemdb.iemdb()
mydb = i["postgis"]
mydb.query("SET TIME ZONE 'GMT'")
# Get CGI vars
form = cgi.FormContent()
year = int(form["year"][0])
etn = int(form["etn"][0])
fp = "watch_%s_%s" % (year, etn)
os.chdir("/tmp/")
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("SIG", dbflib.FTString, 1, 0)
dbf.add_field("ETN", dbflib.FTInteger, 4, 0)
sql = """select astext(multi(geomunion(geom))) as tgeom
from warnings_%s WHERE significance = 'A' and
phenomena IN ('TO','SV') and eventid = %s and
issue < ((select issued from watches WHERE num = %s
and extract(year from issued) = %s) + '30 minutes'::interval)
""" % (year, etn, etn, year)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
sys.exit()
s = rs[0]["tgeom"]
f = wellknowntext.convert_well_known_text(s)
d = {}
d["SIG"] = 'A'
d["ETN"] = etn
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(0, d)
del(obj)
del(shp)
del(dbf)
# Create zip file, send it back to the clients
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
print file(fp+".zip", 'r').read(),
os.remove(fp+".zip")
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
<commit_before><commit_msg>Add tool as per request<commit_after>
|
#!/mesonet/python/bin/python
# Generate a shapefile of the WOU outline.
# 28 Aug 2004 port to iem40
import shapelib, dbflib, mx.DateTime, zipfile, os, sys, shutil, cgi
from pyIEM import wellknowntext, iemdb
i = iemdb.iemdb()
mydb = i["postgis"]
mydb.query("SET TIME ZONE 'GMT'")
# Get CGI vars
form = cgi.FormContent()
year = int(form["year"][0])
etn = int(form["etn"][0])
fp = "watch_%s_%s" % (year, etn)
os.chdir("/tmp/")
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("SIG", dbflib.FTString, 1, 0)
dbf.add_field("ETN", dbflib.FTInteger, 4, 0)
sql = """select astext(multi(geomunion(geom))) as tgeom
from warnings_%s WHERE significance = 'A' and
phenomena IN ('TO','SV') and eventid = %s and
issue < ((select issued from watches WHERE num = %s
and extract(year from issued) = %s) + '30 minutes'::interval)
""" % (year, etn, etn, year)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
sys.exit()
s = rs[0]["tgeom"]
f = wellknowntext.convert_well_known_text(s)
d = {}
d["SIG"] = 'A'
d["ETN"] = etn
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(0, d)
del(obj)
del(shp)
del(dbf)
# Create zip file, send it back to the clients
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
print file(fp+".zip", 'r').read(),
os.remove(fp+".zip")
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
Add tool as per request#!/mesonet/python/bin/python
# Generate a shapefile of the WOU outline.
# 28 Aug 2004 port to iem40
import shapelib, dbflib, mx.DateTime, zipfile, os, sys, shutil, cgi
from pyIEM import wellknowntext, iemdb
i = iemdb.iemdb()
mydb = i["postgis"]
mydb.query("SET TIME ZONE 'GMT'")
# Get CGI vars
form = cgi.FormContent()
year = int(form["year"][0])
etn = int(form["etn"][0])
fp = "watch_%s_%s" % (year, etn)
os.chdir("/tmp/")
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("SIG", dbflib.FTString, 1, 0)
dbf.add_field("ETN", dbflib.FTInteger, 4, 0)
sql = """select astext(multi(geomunion(geom))) as tgeom
from warnings_%s WHERE significance = 'A' and
phenomena IN ('TO','SV') and eventid = %s and
issue < ((select issued from watches WHERE num = %s
and extract(year from issued) = %s) + '30 minutes'::interval)
""" % (year, etn, etn, year)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
sys.exit()
s = rs[0]["tgeom"]
f = wellknowntext.convert_well_known_text(s)
d = {}
d["SIG"] = 'A'
d["ETN"] = etn
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(0, d)
del(obj)
del(shp)
del(dbf)
# Create zip file, send it back to the clients
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
print file(fp+".zip", 'r').read(),
os.remove(fp+".zip")
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
<commit_before><commit_msg>Add tool as per request<commit_after>#!/mesonet/python/bin/python
# Generate a shapefile of the WOU outline.
# 28 Aug 2004 port to iem40
import shapelib, dbflib, mx.DateTime, zipfile, os, sys, shutil, cgi
from pyIEM import wellknowntext, iemdb
i = iemdb.iemdb()
mydb = i["postgis"]
mydb.query("SET TIME ZONE 'GMT'")
# Get CGI vars
form = cgi.FormContent()
year = int(form["year"][0])
etn = int(form["etn"][0])
fp = "watch_%s_%s" % (year, etn)
os.chdir("/tmp/")
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("SIG", dbflib.FTString, 1, 0)
dbf.add_field("ETN", dbflib.FTInteger, 4, 0)
sql = """select astext(multi(geomunion(geom))) as tgeom
from warnings_%s WHERE significance = 'A' and
phenomena IN ('TO','SV') and eventid = %s and
issue < ((select issued from watches WHERE num = %s
and extract(year from issued) = %s) + '30 minutes'::interval)
""" % (year, etn, etn, year)
rs = mydb.query(sql).dictresult()
if len(rs) == 0:
sys.exit()
s = rs[0]["tgeom"]
f = wellknowntext.convert_well_known_text(s)
d = {}
d["SIG"] = 'A'
d["ETN"] = etn
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(0, d)
del(obj)
del(shp)
del(dbf)
# Create zip file, send it back to the clients
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
print file(fp+".zip", 'r').read(),
os.remove(fp+".zip")
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
|
5ded544070c0f6c2a07a11e48e94ded92c79d141
|
pyudl/tds/test/test_catalog.py
|
pyudl/tds/test/test_catalog.py
|
from pyudl.tds import TDSCatalog
class TestCatalog(object):
baseURL = 'http://thredds-test.unidata.ucar.edu/thredds/'
def test_basic(self):
url = baseURL + 'catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalogRefs
def test_access(self):
url = baseURL + 'catalog/grib/NCEP/GFS/Global_0p5deg/latest.xml'
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.accessUrls
|
Add some simple unit tests.
|
Add some simple unit tests.
|
Python
|
bsd-3-clause
|
MoonRaker/siphon,Unidata/siphon,dopplershift/siphon,dopplershift/siphon,hyoklee/siphon
|
Add some simple unit tests.
|
from pyudl.tds import TDSCatalog
class TestCatalog(object):
baseURL = 'http://thredds-test.unidata.ucar.edu/thredds/'
def test_basic(self):
url = baseURL + 'catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalogRefs
def test_access(self):
url = baseURL + 'catalog/grib/NCEP/GFS/Global_0p5deg/latest.xml'
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.accessUrls
|
<commit_before><commit_msg>Add some simple unit tests.<commit_after>
|
from pyudl.tds import TDSCatalog
class TestCatalog(object):
baseURL = 'http://thredds-test.unidata.ucar.edu/thredds/'
def test_basic(self):
url = baseURL + 'catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalogRefs
def test_access(self):
url = baseURL + 'catalog/grib/NCEP/GFS/Global_0p5deg/latest.xml'
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.accessUrls
|
Add some simple unit tests.from pyudl.tds import TDSCatalog
class TestCatalog(object):
baseURL = 'http://thredds-test.unidata.ucar.edu/thredds/'
def test_basic(self):
url = baseURL + 'catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalogRefs
def test_access(self):
url = baseURL + 'catalog/grib/NCEP/GFS/Global_0p5deg/latest.xml'
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.accessUrls
|
<commit_before><commit_msg>Add some simple unit tests.<commit_after>from pyudl.tds import TDSCatalog
class TestCatalog(object):
baseURL = 'http://thredds-test.unidata.ucar.edu/thredds/'
def test_basic(self):
url = baseURL + 'catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalogRefs
def test_access(self):
url = baseURL + 'catalog/grib/NCEP/GFS/Global_0p5deg/latest.xml'
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.accessUrls
|
|
82b5fbc30c31fa6c3ba5ec29a4088d539987e12b
|
dds_playground/generate_midimapping.py
|
dds_playground/generate_midimapping.py
|
def midi_to_freq(note):
return (2**((note-69)/12)) * 440
print("const uint16_t notemappings[] PROGMEM = {")
print(", ".join([str(round(midi_to_freq(note))) for note in range(0,128)]))
print("};")
|
Add python script to generate MIDI note -> frequency tables
|
Add python script to generate MIDI note -> frequency tables
|
Python
|
mit
|
ArchimedesPi/synths,ArchimedesPi/synths,ArchimedesPi/synths
|
Add python script to generate MIDI note -> frequency tables
|
def midi_to_freq(note):
return (2**((note-69)/12)) * 440
print("const uint16_t notemappings[] PROGMEM = {")
print(", ".join([str(round(midi_to_freq(note))) for note in range(0,128)]))
print("};")
|
<commit_before><commit_msg>Add python script to generate MIDI note -> frequency tables<commit_after>
|
def midi_to_freq(note):
return (2**((note-69)/12)) * 440
print("const uint16_t notemappings[] PROGMEM = {")
print(", ".join([str(round(midi_to_freq(note))) for note in range(0,128)]))
print("};")
|
Add python script to generate MIDI note -> frequency tablesdef midi_to_freq(note):
return (2**((note-69)/12)) * 440
print("const uint16_t notemappings[] PROGMEM = {")
print(", ".join([str(round(midi_to_freq(note))) for note in range(0,128)]))
print("};")
|
<commit_before><commit_msg>Add python script to generate MIDI note -> frequency tables<commit_after>def midi_to_freq(note):
return (2**((note-69)/12)) * 440
print("const uint16_t notemappings[] PROGMEM = {")
print(", ".join([str(round(midi_to_freq(note))) for note in range(0,128)]))
print("};")
|
|
a6fba1b774d5ddeaa8eeab1b9a394a2d5ec0fdcf
|
tests/terminal_tests/__init__.py
|
tests/terminal_tests/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Add module for Terminal tests
|
Add module for Terminal tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add module for Terminal tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add module for Terminal tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Add module for Terminal tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add module for Terminal tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
|
7b6ad0ce2e6c3c623966c5568989ed5042fb9b1d
|
ports/nrf/modules/scripts/_mkfs.py
|
ports/nrf/modules/scripts/_mkfs.py
|
import uos, nrf
try:
from uos import VfsLfs1
uos.VfsLfs1.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsLfs2
uos.VfsLfs2.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsFat
uos.VfsFat.mkfs(nrf.Flash())
except ImportError:
pass
except OSError as e:
if e.args[0] == 5: # I/O Error
flashbdev_size = (nrf.Flash.ioctl(4, 0) * nrf.Flash.ioctl(5, 0)) // 1024
print()
print("Is `FS_SIZE=%iK` enough for FAT filesystem?" % flashbdev_size)
|
Add file system formatting script.
|
nrf/modules/scripts: Add file system formatting script.
Add a helper script _mkfs.py which automatically formats the file system if
nrf.Flash() is located and a VFS file system has been included in the
compilation.
The precedence is: first LFS1, LFS2 then FAT.
|
Python
|
mit
|
bvernoux/micropython,adafruit/circuitpython,adafruit/circuitpython,bvernoux/micropython,bvernoux/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,bvernoux/micropython,bvernoux/micropython
|
nrf/modules/scripts: Add file system formatting script.
Add a helper script _mkfs.py which automatically formats the file system if
nrf.Flash() is located and a VFS file system has been included in the
compilation.
The precedence is: first LFS1, LFS2 then FAT.
|
import uos, nrf
try:
from uos import VfsLfs1
uos.VfsLfs1.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsLfs2
uos.VfsLfs2.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsFat
uos.VfsFat.mkfs(nrf.Flash())
except ImportError:
pass
except OSError as e:
if e.args[0] == 5: # I/O Error
flashbdev_size = (nrf.Flash.ioctl(4, 0) * nrf.Flash.ioctl(5, 0)) // 1024
print()
print("Is `FS_SIZE=%iK` enough for FAT filesystem?" % flashbdev_size)
|
<commit_before><commit_msg>nrf/modules/scripts: Add file system formatting script.
Add a helper script _mkfs.py which automatically formats the file system if
nrf.Flash() is located and a VFS file system has been included in the
compilation.
The precedence is: first LFS1, LFS2 then FAT.<commit_after>
|
import uos, nrf
try:
from uos import VfsLfs1
uos.VfsLfs1.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsLfs2
uos.VfsLfs2.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsFat
uos.VfsFat.mkfs(nrf.Flash())
except ImportError:
pass
except OSError as e:
if e.args[0] == 5: # I/O Error
flashbdev_size = (nrf.Flash.ioctl(4, 0) * nrf.Flash.ioctl(5, 0)) // 1024
print()
print("Is `FS_SIZE=%iK` enough for FAT filesystem?" % flashbdev_size)
|
nrf/modules/scripts: Add file system formatting script.
Add a helper script _mkfs.py which automatically formats the file system if
nrf.Flash() is located and a VFS file system has been included in the
compilation.
The precedence is: first LFS1, LFS2 then FAT.import uos, nrf
try:
from uos import VfsLfs1
uos.VfsLfs1.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsLfs2
uos.VfsLfs2.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsFat
uos.VfsFat.mkfs(nrf.Flash())
except ImportError:
pass
except OSError as e:
if e.args[0] == 5: # I/O Error
flashbdev_size = (nrf.Flash.ioctl(4, 0) * nrf.Flash.ioctl(5, 0)) // 1024
print()
print("Is `FS_SIZE=%iK` enough for FAT filesystem?" % flashbdev_size)
|
<commit_before><commit_msg>nrf/modules/scripts: Add file system formatting script.
Add a helper script _mkfs.py which automatically formats the file system if
nrf.Flash() is located and a VFS file system has been included in the
compilation.
The precedence is: first LFS1, LFS2 then FAT.<commit_after>import uos, nrf
try:
from uos import VfsLfs1
uos.VfsLfs1.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsLfs2
uos.VfsLfs2.mkfs(nrf.Flash())
except ImportError:
try:
from uos import VfsFat
uos.VfsFat.mkfs(nrf.Flash())
except ImportError:
pass
except OSError as e:
if e.args[0] == 5: # I/O Error
flashbdev_size = (nrf.Flash.ioctl(4, 0) * nrf.Flash.ioctl(5, 0)) // 1024
print()
print("Is `FS_SIZE=%iK` enough for FAT filesystem?" % flashbdev_size)
|
|
735de8167d3e6b0085710d406c8c2976913baa43
|
tests/test_spider_mw.py
|
tests/test_spider_mw.py
|
import logging
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class CommonTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl()
raise defer.returnValue(log)
class LogExceptionMiddleware(object):
def process_spider_exception(self, response, exception, spider):
logging.warn('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class ProcessSpiderInputSpider(Spider):
name = 'ProcessSpiderInputSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
__name__ + '.LogExceptionMiddleware': 10,
__name__ + '.FailProcessSpiderInputMiddleware': 8,
__name__ + '.LogExceptionMiddleware': 6,
# engine
}
}
def start_requests(self):
yield Request('http://localhost:8998', callback=self.parse, errback=self.errback)
def parse(self, response):
return [{'test': 1}, {'test': 2}]
def errback(self, failure):
self.logger.warn('Got a Failure on the Request errback')
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
logging.warn('Middleware: will raise ZeroDivisionError')
raise ZeroDivisionError()
class TestProcessSpiderInputSpider(CommonTestCase):
@defer.inlineCallbacks
def test_process_spider_input_errback(self):
"""
(1) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain, it should go directly to the Request errback
"""
log = yield self.crawl_log(ProcessSpiderInputSpider)
self.assertNotIn('Middleware: ZeroDivisionError exception caught', str(log))
self.assertIn('Middleware: will raise ZeroDivisionError', str(log))
self.assertIn('Got a Failure on the Request errback', str(log))
|
Test for exceptions on process_spider_input
|
Test for exceptions on process_spider_input
|
Python
|
bsd-3-clause
|
pablohoffman/scrapy,wujuguang/scrapy,finfish/scrapy,eLRuLL/scrapy,starrify/scrapy,starrify/scrapy,eLRuLL/scrapy,eLRuLL/scrapy,wujuguang/scrapy,pawelmhm/scrapy,pawelmhm/scrapy,wujuguang/scrapy,pablohoffman/scrapy,elacuesta/scrapy,scrapy/scrapy,elacuesta/scrapy,finfish/scrapy,pablohoffman/scrapy,dangra/scrapy,scrapy/scrapy,pawelmhm/scrapy,elacuesta/scrapy,dangra/scrapy,starrify/scrapy,scrapy/scrapy,dangra/scrapy,finfish/scrapy
|
Test for exceptions on process_spider_input
|
import logging
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class CommonTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl()
raise defer.returnValue(log)
class LogExceptionMiddleware(object):
def process_spider_exception(self, response, exception, spider):
logging.warn('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class ProcessSpiderInputSpider(Spider):
name = 'ProcessSpiderInputSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
__name__ + '.LogExceptionMiddleware': 10,
__name__ + '.FailProcessSpiderInputMiddleware': 8,
__name__ + '.LogExceptionMiddleware': 6,
# engine
}
}
def start_requests(self):
yield Request('http://localhost:8998', callback=self.parse, errback=self.errback)
def parse(self, response):
return [{'test': 1}, {'test': 2}]
def errback(self, failure):
self.logger.warn('Got a Failure on the Request errback')
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
logging.warn('Middleware: will raise ZeroDivisionError')
raise ZeroDivisionError()
class TestProcessSpiderInputSpider(CommonTestCase):
@defer.inlineCallbacks
def test_process_spider_input_errback(self):
"""
(1) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain, it should go directly to the Request errback
"""
log = yield self.crawl_log(ProcessSpiderInputSpider)
self.assertNotIn('Middleware: ZeroDivisionError exception caught', str(log))
self.assertIn('Middleware: will raise ZeroDivisionError', str(log))
self.assertIn('Got a Failure on the Request errback', str(log))
|
<commit_before><commit_msg>Test for exceptions on process_spider_input<commit_after>
|
import logging
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class CommonTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl()
raise defer.returnValue(log)
class LogExceptionMiddleware(object):
def process_spider_exception(self, response, exception, spider):
logging.warn('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class ProcessSpiderInputSpider(Spider):
name = 'ProcessSpiderInputSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
__name__ + '.LogExceptionMiddleware': 10,
__name__ + '.FailProcessSpiderInputMiddleware': 8,
__name__ + '.LogExceptionMiddleware': 6,
# engine
}
}
def start_requests(self):
yield Request('http://localhost:8998', callback=self.parse, errback=self.errback)
def parse(self, response):
return [{'test': 1}, {'test': 2}]
def errback(self, failure):
self.logger.warn('Got a Failure on the Request errback')
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
logging.warn('Middleware: will raise ZeroDivisionError')
raise ZeroDivisionError()
class TestProcessSpiderInputSpider(CommonTestCase):
@defer.inlineCallbacks
def test_process_spider_input_errback(self):
"""
(1) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain, it should go directly to the Request errback
"""
log = yield self.crawl_log(ProcessSpiderInputSpider)
self.assertNotIn('Middleware: ZeroDivisionError exception caught', str(log))
self.assertIn('Middleware: will raise ZeroDivisionError', str(log))
self.assertIn('Got a Failure on the Request errback', str(log))
|
Test for exceptions on process_spider_input
import logging
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class CommonTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl()
raise defer.returnValue(log)
class LogExceptionMiddleware(object):
def process_spider_exception(self, response, exception, spider):
logging.warn('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class ProcessSpiderInputSpider(Spider):
name = 'ProcessSpiderInputSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
__name__ + '.LogExceptionMiddleware': 10,
__name__ + '.FailProcessSpiderInputMiddleware': 8,
__name__ + '.LogExceptionMiddleware': 6,
# engine
}
}
def start_requests(self):
yield Request('http://localhost:8998', callback=self.parse, errback=self.errback)
def parse(self, response):
return [{'test': 1}, {'test': 2}]
def errback(self, failure):
self.logger.warn('Got a Failure on the Request errback')
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
logging.warn('Middleware: will raise ZeroDivisionError')
raise ZeroDivisionError()
class TestProcessSpiderInputSpider(CommonTestCase):
@defer.inlineCallbacks
def test_process_spider_input_errback(self):
"""
(1) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain, it should go directly to the Request errback
"""
log = yield self.crawl_log(ProcessSpiderInputSpider)
self.assertNotIn('Middleware: ZeroDivisionError exception caught', str(log))
self.assertIn('Middleware: will raise ZeroDivisionError', str(log))
self.assertIn('Got a Failure on the Request errback', str(log))
|
<commit_before><commit_msg>Test for exceptions on process_spider_input<commit_after>
import logging
from testfixtures import LogCapture
from twisted.trial.unittest import TestCase
from twisted.internet import defer
from scrapy import Spider, Request
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class CommonTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def tearDownClass(cls):
cls.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def crawl_log(self, spider):
crawler = get_crawler(spider)
with LogCapture() as log:
yield crawler.crawl()
raise defer.returnValue(log)
class LogExceptionMiddleware(object):
def process_spider_exception(self, response, exception, spider):
logging.warn('Middleware: %s exception caught', exception.__class__.__name__)
return None
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class ProcessSpiderInputSpider(Spider):
name = 'ProcessSpiderInputSpider'
custom_settings = {
'SPIDER_MIDDLEWARES': {
# spider
__name__ + '.LogExceptionMiddleware': 10,
__name__ + '.FailProcessSpiderInputMiddleware': 8,
__name__ + '.LogExceptionMiddleware': 6,
# engine
}
}
def start_requests(self):
yield Request('http://localhost:8998', callback=self.parse, errback=self.errback)
def parse(self, response):
return [{'test': 1}, {'test': 2}]
def errback(self, failure):
self.logger.warn('Got a Failure on the Request errback')
class FailProcessSpiderInputMiddleware:
def process_spider_input(self, response, spider):
logging.warn('Middleware: will raise ZeroDivisionError')
raise ZeroDivisionError()
class TestProcessSpiderInputSpider(CommonTestCase):
@defer.inlineCallbacks
def test_process_spider_input_errback(self):
"""
(1) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain, it should go directly to the Request errback
"""
log = yield self.crawl_log(ProcessSpiderInputSpider)
self.assertNotIn('Middleware: ZeroDivisionError exception caught', str(log))
self.assertIn('Middleware: will raise ZeroDivisionError', str(log))
self.assertIn('Got a Failure on the Request errback', str(log))
|
|
84494fc4db847523a88b70f8cd1c93c10b689ad1
|
chacha.py
|
chacha.py
|
#!/usr/bin/env python
import argparse
import atexit
import logging
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
DOCKER_BASE_URL_HELP = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
DOCKER_API_VERSION_HELP = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
DOCKER_HTTP_TIMEOUT_HELP = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
def _exit():
logging.shutdown()
def main():
atexit.register(func=_exit)
parser = argparse.ArgumentParser(description='Clean old docker images')
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=DOCKER_BASE_URL_HELP, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=DOCKER_API_VERSION_HELP, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=DOCKER_HTTP_TIMEOUT_HELP, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
main()
|
Add docker client options to argparse
|
Add docker client options to argparse
All docker-py client options are handled through argparse, except the
ssl options.
|
Python
|
mit
|
aleasoluciones/docker-image-cleaner,aebm/docker-image-cleaner
|
Add docker client options to argparse
All docker-py client options are handled through argparse, except the
ssl options.
|
#!/usr/bin/env python
import argparse
import atexit
import logging
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
DOCKER_BASE_URL_HELP = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
DOCKER_API_VERSION_HELP = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
DOCKER_HTTP_TIMEOUT_HELP = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
def _exit():
logging.shutdown()
def main():
atexit.register(func=_exit)
parser = argparse.ArgumentParser(description='Clean old docker images')
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=DOCKER_BASE_URL_HELP, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=DOCKER_API_VERSION_HELP, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=DOCKER_HTTP_TIMEOUT_HELP, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add docker client options to argparse
All docker-py client options are handled through argparse, except the
ssl options.<commit_after>
|
#!/usr/bin/env python
import argparse
import atexit
import logging
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
DOCKER_BASE_URL_HELP = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
DOCKER_API_VERSION_HELP = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
DOCKER_HTTP_TIMEOUT_HELP = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
def _exit():
logging.shutdown()
def main():
atexit.register(func=_exit)
parser = argparse.ArgumentParser(description='Clean old docker images')
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=DOCKER_BASE_URL_HELP, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=DOCKER_API_VERSION_HELP, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=DOCKER_HTTP_TIMEOUT_HELP, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
main()
|
Add docker client options to argparse
All docker-py client options are handled through argparse, except the
ssl options.#!/usr/bin/env python
import argparse
import atexit
import logging
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
DOCKER_BASE_URL_HELP = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
DOCKER_API_VERSION_HELP = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
DOCKER_HTTP_TIMEOUT_HELP = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
def _exit():
logging.shutdown()
def main():
atexit.register(func=_exit)
parser = argparse.ArgumentParser(description='Clean old docker images')
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=DOCKER_BASE_URL_HELP, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=DOCKER_API_VERSION_HELP, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=DOCKER_HTTP_TIMEOUT_HELP, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add docker client options to argparse
All docker-py client options are handled through argparse, except the
ssl options.<commit_after>#!/usr/bin/env python
import argparse
import atexit
import logging
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
DOCKER_BASE_URL_HELP = ('Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
DOCKER_API_VERSION_HELP = ('The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
DOCKER_HTTP_TIMEOUT_HELP = ('The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
def _exit():
logging.shutdown()
def main():
atexit.register(func=_exit)
parser = argparse.ArgumentParser(description='Clean old docker images')
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument('--base-url', help=DOCKER_BASE_URL_HELP, default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument('--api-version', help=DOCKER_API_VERSION_HELP, default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument('--http-timeout', help=DOCKER_HTTP_TIMEOUT_HELP, default=DEFAULT_DOCKER_HTTP_TIMEOUT, type=int)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
main()
|
|
16e27029f32ae0329a7da022efd06625987d1622
|
itunes.py
|
itunes.py
|
#!/usr/bin/env python3
from collections import defaultdict
from dateutil.parser import parse
from pathlib import Path
import xml.etree.ElementTree as ET
LIBRARY = Path.home() / "Music/iTunes/iTunes Library.xml"
def plist_iter(iterable, all_dicts=False):
a = iter(iterable)
for k, v in zip(a, a):
assert k.tag == "key"
if all_dicts:
if v.tag != "dict":
print(f"For key {k.text}, not dict but {v.tag}")
assert v.tag == "dict"
yield k.text, v
def extract_songs(tree):
root = tree.getroot()[0]
tracks = None
for key, node in plist_iter(root):
if key == "Tracks":
tracks = node
songs = []
for key, node in plist_iter(tracks, all_dicts=True):
is_music = False
song = {}
for k, n in plist_iter(node):
if k == "Kind":
if "audio" in n.text:
is_music = True
else:
break
elif k in ("Podcast", "Movie", "Audiobooks"):
is_music = False
break
elif k == "Play Count":
song["play_count"] = int(n.text)
elif k == "Date Added":
song["date_added"] = parse(n.text)
elif k == "Name":
if "wcpe" in n.text.lower():
is_music = False
break
song["name"] = n.text
elif k == "Album":
song["album"] = n.text
elif k == "Artist":
song["artist"] = n.text
if is_music:
songs.append(song)
return songs
def make_playlists(songs):
by_year = defaultdict(list)
for song in songs:
if "date_added" in song and "play_count" in song:
by_year[song["date_added"].year].append(song)
for _, song_list in by_year.items():
song_list.sort(key=lambda s: s["play_count"], reverse=True)
return by_year
def print_top_songs(playlists):
for year in range(2010, 2020):
print(f"{year}\n====")
for i, song in enumerate(playlists[year][:25]):
play_count = song["play_count"]
name = song.get("name", "Unknown name")
artist = song.get("artist", "Unknown artist")
album = song.get("album", "Unknown album")
print(f"{i+1}. [{play_count}] {name} | {artist} | {album}")
print("\n\n")
def main():
tree = ET.parse(LIBRARY)
songs = extract_songs(tree)
playlists = make_playlists(songs)
print_top_songs(playlists)
if __name__ == "__main__":
main()
|
Add iTunes script for making year playlists
|
Add iTunes script for making year playlists
|
Python
|
mit
|
mk12/scripts,mk12/scripts,mk12/scripts,mk12/scripts
|
Add iTunes script for making year playlists
|
#!/usr/bin/env python3
from collections import defaultdict
from dateutil.parser import parse
from pathlib import Path
import xml.etree.ElementTree as ET
LIBRARY = Path.home() / "Music/iTunes/iTunes Library.xml"
def plist_iter(iterable, all_dicts=False):
a = iter(iterable)
for k, v in zip(a, a):
assert k.tag == "key"
if all_dicts:
if v.tag != "dict":
print(f"For key {k.text}, not dict but {v.tag}")
assert v.tag == "dict"
yield k.text, v
def extract_songs(tree):
root = tree.getroot()[0]
tracks = None
for key, node in plist_iter(root):
if key == "Tracks":
tracks = node
songs = []
for key, node in plist_iter(tracks, all_dicts=True):
is_music = False
song = {}
for k, n in plist_iter(node):
if k == "Kind":
if "audio" in n.text:
is_music = True
else:
break
elif k in ("Podcast", "Movie", "Audiobooks"):
is_music = False
break
elif k == "Play Count":
song["play_count"] = int(n.text)
elif k == "Date Added":
song["date_added"] = parse(n.text)
elif k == "Name":
if "wcpe" in n.text.lower():
is_music = False
break
song["name"] = n.text
elif k == "Album":
song["album"] = n.text
elif k == "Artist":
song["artist"] = n.text
if is_music:
songs.append(song)
return songs
def make_playlists(songs):
by_year = defaultdict(list)
for song in songs:
if "date_added" in song and "play_count" in song:
by_year[song["date_added"].year].append(song)
for _, song_list in by_year.items():
song_list.sort(key=lambda s: s["play_count"], reverse=True)
return by_year
def print_top_songs(playlists):
for year in range(2010, 2020):
print(f"{year}\n====")
for i, song in enumerate(playlists[year][:25]):
play_count = song["play_count"]
name = song.get("name", "Unknown name")
artist = song.get("artist", "Unknown artist")
album = song.get("album", "Unknown album")
print(f"{i+1}. [{play_count}] {name} | {artist} | {album}")
print("\n\n")
def main():
tree = ET.parse(LIBRARY)
songs = extract_songs(tree)
playlists = make_playlists(songs)
print_top_songs(playlists)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add iTunes script for making year playlists<commit_after>
|
#!/usr/bin/env python3
from collections import defaultdict
from dateutil.parser import parse
from pathlib import Path
import xml.etree.ElementTree as ET
LIBRARY = Path.home() / "Music/iTunes/iTunes Library.xml"
def plist_iter(iterable, all_dicts=False):
a = iter(iterable)
for k, v in zip(a, a):
assert k.tag == "key"
if all_dicts:
if v.tag != "dict":
print(f"For key {k.text}, not dict but {v.tag}")
assert v.tag == "dict"
yield k.text, v
def extract_songs(tree):
root = tree.getroot()[0]
tracks = None
for key, node in plist_iter(root):
if key == "Tracks":
tracks = node
songs = []
for key, node in plist_iter(tracks, all_dicts=True):
is_music = False
song = {}
for k, n in plist_iter(node):
if k == "Kind":
if "audio" in n.text:
is_music = True
else:
break
elif k in ("Podcast", "Movie", "Audiobooks"):
is_music = False
break
elif k == "Play Count":
song["play_count"] = int(n.text)
elif k == "Date Added":
song["date_added"] = parse(n.text)
elif k == "Name":
if "wcpe" in n.text.lower():
is_music = False
break
song["name"] = n.text
elif k == "Album":
song["album"] = n.text
elif k == "Artist":
song["artist"] = n.text
if is_music:
songs.append(song)
return songs
def make_playlists(songs):
by_year = defaultdict(list)
for song in songs:
if "date_added" in song and "play_count" in song:
by_year[song["date_added"].year].append(song)
for _, song_list in by_year.items():
song_list.sort(key=lambda s: s["play_count"], reverse=True)
return by_year
def print_top_songs(playlists):
for year in range(2010, 2020):
print(f"{year}\n====")
for i, song in enumerate(playlists[year][:25]):
play_count = song["play_count"]
name = song.get("name", "Unknown name")
artist = song.get("artist", "Unknown artist")
album = song.get("album", "Unknown album")
print(f"{i+1}. [{play_count}] {name} | {artist} | {album}")
print("\n\n")
def main():
tree = ET.parse(LIBRARY)
songs = extract_songs(tree)
playlists = make_playlists(songs)
print_top_songs(playlists)
if __name__ == "__main__":
main()
|
Add iTunes script for making year playlists#!/usr/bin/env python3
from collections import defaultdict
from dateutil.parser import parse
from pathlib import Path
import xml.etree.ElementTree as ET
LIBRARY = Path.home() / "Music/iTunes/iTunes Library.xml"
def plist_iter(iterable, all_dicts=False):
a = iter(iterable)
for k, v in zip(a, a):
assert k.tag == "key"
if all_dicts:
if v.tag != "dict":
print(f"For key {k.text}, not dict but {v.tag}")
assert v.tag == "dict"
yield k.text, v
def extract_songs(tree):
root = tree.getroot()[0]
tracks = None
for key, node in plist_iter(root):
if key == "Tracks":
tracks = node
songs = []
for key, node in plist_iter(tracks, all_dicts=True):
is_music = False
song = {}
for k, n in plist_iter(node):
if k == "Kind":
if "audio" in n.text:
is_music = True
else:
break
elif k in ("Podcast", "Movie", "Audiobooks"):
is_music = False
break
elif k == "Play Count":
song["play_count"] = int(n.text)
elif k == "Date Added":
song["date_added"] = parse(n.text)
elif k == "Name":
if "wcpe" in n.text.lower():
is_music = False
break
song["name"] = n.text
elif k == "Album":
song["album"] = n.text
elif k == "Artist":
song["artist"] = n.text
if is_music:
songs.append(song)
return songs
def make_playlists(songs):
by_year = defaultdict(list)
for song in songs:
if "date_added" in song and "play_count" in song:
by_year[song["date_added"].year].append(song)
for _, song_list in by_year.items():
song_list.sort(key=lambda s: s["play_count"], reverse=True)
return by_year
def print_top_songs(playlists):
for year in range(2010, 2020):
print(f"{year}\n====")
for i, song in enumerate(playlists[year][:25]):
play_count = song["play_count"]
name = song.get("name", "Unknown name")
artist = song.get("artist", "Unknown artist")
album = song.get("album", "Unknown album")
print(f"{i+1}. [{play_count}] {name} | {artist} | {album}")
print("\n\n")
def main():
tree = ET.parse(LIBRARY)
songs = extract_songs(tree)
playlists = make_playlists(songs)
print_top_songs(playlists)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add iTunes script for making year playlists<commit_after>#!/usr/bin/env python3
from collections import defaultdict
from dateutil.parser import parse
from pathlib import Path
import xml.etree.ElementTree as ET
LIBRARY = Path.home() / "Music/iTunes/iTunes Library.xml"
def plist_iter(iterable, all_dicts=False):
a = iter(iterable)
for k, v in zip(a, a):
assert k.tag == "key"
if all_dicts:
if v.tag != "dict":
print(f"For key {k.text}, not dict but {v.tag}")
assert v.tag == "dict"
yield k.text, v
def extract_songs(tree):
root = tree.getroot()[0]
tracks = None
for key, node in plist_iter(root):
if key == "Tracks":
tracks = node
songs = []
for key, node in plist_iter(tracks, all_dicts=True):
is_music = False
song = {}
for k, n in plist_iter(node):
if k == "Kind":
if "audio" in n.text:
is_music = True
else:
break
elif k in ("Podcast", "Movie", "Audiobooks"):
is_music = False
break
elif k == "Play Count":
song["play_count"] = int(n.text)
elif k == "Date Added":
song["date_added"] = parse(n.text)
elif k == "Name":
if "wcpe" in n.text.lower():
is_music = False
break
song["name"] = n.text
elif k == "Album":
song["album"] = n.text
elif k == "Artist":
song["artist"] = n.text
if is_music:
songs.append(song)
return songs
def make_playlists(songs):
by_year = defaultdict(list)
for song in songs:
if "date_added" in song and "play_count" in song:
by_year[song["date_added"].year].append(song)
for _, song_list in by_year.items():
song_list.sort(key=lambda s: s["play_count"], reverse=True)
return by_year
def print_top_songs(playlists):
for year in range(2010, 2020):
print(f"{year}\n====")
for i, song in enumerate(playlists[year][:25]):
play_count = song["play_count"]
name = song.get("name", "Unknown name")
artist = song.get("artist", "Unknown artist")
album = song.get("album", "Unknown album")
print(f"{i+1}. [{play_count}] {name} | {artist} | {album}")
print("\n\n")
def main():
tree = ET.parse(LIBRARY)
songs = extract_songs(tree)
playlists = make_playlists(songs)
print_top_songs(playlists)
if __name__ == "__main__":
main()
|
|
0bc2a9851f695418ce99d6a7b83fcb7843a28f88
|
osf/migrations/0043_set_share_title.py
|
osf/migrations/0043_set_share_title.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 15:07
"""Sets the share_title field on production PreprintProviders. Makes no
updates if the listed providers don't exist in the current envirionment.
"""
from __future__ import unicode_literals
from django.db import migrations
# _id => share_title
SHARE_TITLES = {
'osf': 'OSF',
'lawarxiv': 'LawArXiv',
'mindrxiv': 'MindRxiv',
'bitss': 'BITSS',
'agrixiv': 'AgriXiv',
'engrxiv': 'engrXiv',
'lissa': 'LIS Scholarship Archive',
'psyarxiv': 'PsyArXiv',
'socarxiv': 'SocArXiv',
}
def set_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
for provider in PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())):
provider.share_title = SHARE_TITLES[provider._id]
provider.save()
def unset_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())).update(share_title='')
class Migration(migrations.Migration):
dependencies = [
('osf', '0042_preprintprovider_share_title'),
]
operations = [
migrations.RunPython(
set_share_titles, unset_share_titles
),
]
|
Add migration to set share_title for PreprintProviders on prod
|
Add migration to set share_title for PreprintProviders on prod
|
Python
|
apache-2.0
|
leb2dg/osf.io,mattclark/osf.io,saradbowman/osf.io,cslzchen/osf.io,binoculars/osf.io,caneruguz/osf.io,erinspace/osf.io,sloria/osf.io,caseyrollins/osf.io,baylee-d/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,adlius/osf.io,adlius/osf.io,pattisdr/osf.io,leb2dg/osf.io,chennan47/osf.io,aaxelb/osf.io,binoculars/osf.io,brianjgeiger/osf.io,adlius/osf.io,felliott/osf.io,icereval/osf.io,mfraezz/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,crcresearch/osf.io,saradbowman/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,crcresearch/osf.io,baylee-d/osf.io,mfraezz/osf.io,felliott/osf.io,HalcyonChimera/osf.io,icereval/osf.io,sloria/osf.io,pattisdr/osf.io,erinspace/osf.io,leb2dg/osf.io,icereval/osf.io,felliott/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,erinspace/osf.io,mattclark/osf.io,felliott/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,adlius/osf.io,chrisseto/osf.io,mattclark/osf.io,cslzchen/osf.io,mfraezz/osf.io,chennan47/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,chennan47/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,TomBaxter/osf.io,baylee-d/osf.io,leb2dg/osf.io,mfraezz/osf.io,sloria/osf.io,chrisseto/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io
|
Add migration to set share_title for PreprintProviders on prod
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 15:07
"""Sets the share_title field on production PreprintProviders. Makes no
updates if the listed providers don't exist in the current envirionment.
"""
from __future__ import unicode_literals
from django.db import migrations
# _id => share_title
SHARE_TITLES = {
'osf': 'OSF',
'lawarxiv': 'LawArXiv',
'mindrxiv': 'MindRxiv',
'bitss': 'BITSS',
'agrixiv': 'AgriXiv',
'engrxiv': 'engrXiv',
'lissa': 'LIS Scholarship Archive',
'psyarxiv': 'PsyArXiv',
'socarxiv': 'SocArXiv',
}
def set_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
for provider in PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())):
provider.share_title = SHARE_TITLES[provider._id]
provider.save()
def unset_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())).update(share_title='')
class Migration(migrations.Migration):
dependencies = [
('osf', '0042_preprintprovider_share_title'),
]
operations = [
migrations.RunPython(
set_share_titles, unset_share_titles
),
]
|
<commit_before><commit_msg>Add migration to set share_title for PreprintProviders on prod<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 15:07
"""Sets the share_title field on production PreprintProviders. Makes no
updates if the listed providers don't exist in the current envirionment.
"""
from __future__ import unicode_literals
from django.db import migrations
# _id => share_title
SHARE_TITLES = {
'osf': 'OSF',
'lawarxiv': 'LawArXiv',
'mindrxiv': 'MindRxiv',
'bitss': 'BITSS',
'agrixiv': 'AgriXiv',
'engrxiv': 'engrXiv',
'lissa': 'LIS Scholarship Archive',
'psyarxiv': 'PsyArXiv',
'socarxiv': 'SocArXiv',
}
def set_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
for provider in PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())):
provider.share_title = SHARE_TITLES[provider._id]
provider.save()
def unset_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())).update(share_title='')
class Migration(migrations.Migration):
dependencies = [
('osf', '0042_preprintprovider_share_title'),
]
operations = [
migrations.RunPython(
set_share_titles, unset_share_titles
),
]
|
Add migration to set share_title for PreprintProviders on prod# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 15:07
"""Sets the share_title field on production PreprintProviders. Makes no
updates if the listed providers don't exist in the current envirionment.
"""
from __future__ import unicode_literals
from django.db import migrations
# _id => share_title
SHARE_TITLES = {
'osf': 'OSF',
'lawarxiv': 'LawArXiv',
'mindrxiv': 'MindRxiv',
'bitss': 'BITSS',
'agrixiv': 'AgriXiv',
'engrxiv': 'engrXiv',
'lissa': 'LIS Scholarship Archive',
'psyarxiv': 'PsyArXiv',
'socarxiv': 'SocArXiv',
}
def set_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
for provider in PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())):
provider.share_title = SHARE_TITLES[provider._id]
provider.save()
def unset_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())).update(share_title='')
class Migration(migrations.Migration):
dependencies = [
('osf', '0042_preprintprovider_share_title'),
]
operations = [
migrations.RunPython(
set_share_titles, unset_share_titles
),
]
|
<commit_before><commit_msg>Add migration to set share_title for PreprintProviders on prod<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-03 15:07
"""Sets the share_title field on production PreprintProviders. Makes no
updates if the listed providers don't exist in the current envirionment.
"""
from __future__ import unicode_literals
from django.db import migrations
# _id => share_title
SHARE_TITLES = {
'osf': 'OSF',
'lawarxiv': 'LawArXiv',
'mindrxiv': 'MindRxiv',
'bitss': 'BITSS',
'agrixiv': 'AgriXiv',
'engrxiv': 'engrXiv',
'lissa': 'LIS Scholarship Archive',
'psyarxiv': 'PsyArXiv',
'socarxiv': 'SocArXiv',
}
def set_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
for provider in PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())):
provider.share_title = SHARE_TITLES[provider._id]
provider.save()
def unset_share_titles(state, *args, **kwargs):
PreprintProvider = state.get_model('osf', 'preprintprovider')
PreprintProvider.objects.filter(_id__in=list(SHARE_TITLES.keys())).update(share_title='')
class Migration(migrations.Migration):
dependencies = [
('osf', '0042_preprintprovider_share_title'),
]
operations = [
migrations.RunPython(
set_share_titles, unset_share_titles
),
]
|
|
82fa4a85a1171fd62cab68f90ec7e8ab8fbbc862
|
tools/detect_multiple_usage.py
|
tools/detect_multiple_usage.py
|
# JN 2016-04-08
"""
First read do_sort_neg.txt, CheetahLogFile_*.csv,
and channel_names.csv.
Then check if any channel in do_sort_neg.txt
are using the same reference.
Warning: This assumes that all channels have been renamed
to the CSCxy.ncs format
"""
from __future__ import print_function, division, absolute_import
import os
import glob
import csv
def main(fname_sort_neg, fname_logfile, fname_channels):
"""
parse the files and check for problems
"""
# read channel names
with open(fname_channels, 'r') as fid:
names = dict(csv.reader(fid, delimiter=';'))
# transform to integer based names
int_names = {int(fname[3:]): names[fname] for fname in names}
# read references
with open(fname_logfile, 'r') as fid:
refs = list((csv.reader(fid, delimiter=',')))
name_refs = {}
for item in refs:
if len(item) == 4:
name_refs[item[1]] = item[2]
# read proposed sorting
with open(fname_sort_neg, 'r') as fid:
job_channels = [int(os.path.basename(line.strip())[8:-3])
for line in fid.readlines()]
used_refs = set()
for chan in job_channels:
print('{} (CSC{}) is referenced to {}'.
format(int_names[chan], chan, name_refs[int_names[chan]]))
used_refs.add(name_refs[int_names[chan]])
print('The {} channels in {} use the following {} references: {}'.
format(len(job_channels), fname_sort_neg,
len(used_refs), sorted(used_refs)))
if __name__ == "__main__":
fname_logfile = glob.glob("CheetahLogFile_*.csv")[0]
fname_channels = "do_sort_neg.txt"
fname_channel_names = "channel_names.csv"
print("Checking {} for double references.".format(fname_channels))
print("Using {} and {}".format(fname_logfile, fname_channel_names))
main(fname_channels, fname_logfile, fname_channel_names)
|
Add tool to detect multiple use of a negative unit
|
Add tool to detect multiple use of a negative unit
The script tools/detect_multiple_usage.py automatically parses
'do_sort_neg.txt' and checks whether the same reference channel
occurs more than once.
|
Python
|
mit
|
jniediek/combinato
|
Add tool to detect multiple use of a negative unit
The script tools/detect_multiple_usage.py automatically parses
'do_sort_neg.txt' and checks whether the same reference channel
occurs more than once.
|
# JN 2016-04-08
"""
First read do_sort_neg.txt, CheetahLogFile_*.csv,
and channel_names.csv.
Then check if any channel in do_sort_neg.txt
are using the same reference.
Warning: This assumes that all channels have been renamed
to the CSCxy.ncs format
"""
from __future__ import print_function, division, absolute_import
import os
import glob
import csv
def main(fname_sort_neg, fname_logfile, fname_channels):
"""
parse the files and check for problems
"""
# read channel names
with open(fname_channels, 'r') as fid:
names = dict(csv.reader(fid, delimiter=';'))
# transform to integer based names
int_names = {int(fname[3:]): names[fname] for fname in names}
# read references
with open(fname_logfile, 'r') as fid:
refs = list((csv.reader(fid, delimiter=',')))
name_refs = {}
for item in refs:
if len(item) == 4:
name_refs[item[1]] = item[2]
# read proposed sorting
with open(fname_sort_neg, 'r') as fid:
job_channels = [int(os.path.basename(line.strip())[8:-3])
for line in fid.readlines()]
used_refs = set()
for chan in job_channels:
print('{} (CSC{}) is referenced to {}'.
format(int_names[chan], chan, name_refs[int_names[chan]]))
used_refs.add(name_refs[int_names[chan]])
print('The {} channels in {} use the following {} references: {}'.
format(len(job_channels), fname_sort_neg,
len(used_refs), sorted(used_refs)))
if __name__ == "__main__":
fname_logfile = glob.glob("CheetahLogFile_*.csv")[0]
fname_channels = "do_sort_neg.txt"
fname_channel_names = "channel_names.csv"
print("Checking {} for double references.".format(fname_channels))
print("Using {} and {}".format(fname_logfile, fname_channel_names))
main(fname_channels, fname_logfile, fname_channel_names)
|
<commit_before><commit_msg>Add tool to detect multiple use of a negative unit
The script tools/detect_multiple_usage.py automatically parses
'do_sort_neg.txt' and checks whether the same reference channel
occurs more than once.<commit_after>
|
# JN 2016-04-08
"""
First read do_sort_neg.txt, CheetahLogFile_*.csv,
and channel_names.csv.
Then check if any channel in do_sort_neg.txt
are using the same reference.
Warning: This assumes that all channels have been renamed
to the CSCxy.ncs format
"""
from __future__ import print_function, division, absolute_import
import os
import glob
import csv
def main(fname_sort_neg, fname_logfile, fname_channels):
"""
parse the files and check for problems
"""
# read channel names
with open(fname_channels, 'r') as fid:
names = dict(csv.reader(fid, delimiter=';'))
# transform to integer based names
int_names = {int(fname[3:]): names[fname] for fname in names}
# read references
with open(fname_logfile, 'r') as fid:
refs = list((csv.reader(fid, delimiter=',')))
name_refs = {}
for item in refs:
if len(item) == 4:
name_refs[item[1]] = item[2]
# read proposed sorting
with open(fname_sort_neg, 'r') as fid:
job_channels = [int(os.path.basename(line.strip())[8:-3])
for line in fid.readlines()]
used_refs = set()
for chan in job_channels:
print('{} (CSC{}) is referenced to {}'.
format(int_names[chan], chan, name_refs[int_names[chan]]))
used_refs.add(name_refs[int_names[chan]])
print('The {} channels in {} use the following {} references: {}'.
format(len(job_channels), fname_sort_neg,
len(used_refs), sorted(used_refs)))
if __name__ == "__main__":
fname_logfile = glob.glob("CheetahLogFile_*.csv")[0]
fname_channels = "do_sort_neg.txt"
fname_channel_names = "channel_names.csv"
print("Checking {} for double references.".format(fname_channels))
print("Using {} and {}".format(fname_logfile, fname_channel_names))
main(fname_channels, fname_logfile, fname_channel_names)
|
Add tool to detect multiple use of a negative unit
The script tools/detect_multiple_usage.py automatically parses
'do_sort_neg.txt' and checks whether the same reference channel
occurs more than once.# JN 2016-04-08
"""
First read do_sort_neg.txt, CheetahLogFile_*.csv,
and channel_names.csv.
Then check if any channel in do_sort_neg.txt
are using the same reference.
Warning: This assumes that all channels have been renamed
to the CSCxy.ncs format
"""
from __future__ import print_function, division, absolute_import
import os
import glob
import csv
def main(fname_sort_neg, fname_logfile, fname_channels):
"""
parse the files and check for problems
"""
# read channel names
with open(fname_channels, 'r') as fid:
names = dict(csv.reader(fid, delimiter=';'))
# transform to integer based names
int_names = {int(fname[3:]): names[fname] for fname in names}
# read references
with open(fname_logfile, 'r') as fid:
refs = list((csv.reader(fid, delimiter=',')))
name_refs = {}
for item in refs:
if len(item) == 4:
name_refs[item[1]] = item[2]
# read proposed sorting
with open(fname_sort_neg, 'r') as fid:
job_channels = [int(os.path.basename(line.strip())[8:-3])
for line in fid.readlines()]
used_refs = set()
for chan in job_channels:
print('{} (CSC{}) is referenced to {}'.
format(int_names[chan], chan, name_refs[int_names[chan]]))
used_refs.add(name_refs[int_names[chan]])
print('The {} channels in {} use the following {} references: {}'.
format(len(job_channels), fname_sort_neg,
len(used_refs), sorted(used_refs)))
if __name__ == "__main__":
fname_logfile = glob.glob("CheetahLogFile_*.csv")[0]
fname_channels = "do_sort_neg.txt"
fname_channel_names = "channel_names.csv"
print("Checking {} for double references.".format(fname_channels))
print("Using {} and {}".format(fname_logfile, fname_channel_names))
main(fname_channels, fname_logfile, fname_channel_names)
|
<commit_before><commit_msg>Add tool to detect multiple use of a negative unit
The script tools/detect_multiple_usage.py automatically parses
'do_sort_neg.txt' and checks whether the same reference channel
occurs more than once.<commit_after># JN 2016-04-08
"""
First read do_sort_neg.txt, CheetahLogFile_*.csv,
and channel_names.csv.
Then check if any channel in do_sort_neg.txt
are using the same reference.
Warning: This assumes that all channels have been renamed
to the CSCxy.ncs format
"""
from __future__ import print_function, division, absolute_import
import os
import glob
import csv
def main(fname_sort_neg, fname_logfile, fname_channels):
"""
parse the files and check for problems
"""
# read channel names
with open(fname_channels, 'r') as fid:
names = dict(csv.reader(fid, delimiter=';'))
# transform to integer based names
int_names = {int(fname[3:]): names[fname] for fname in names}
# read references
with open(fname_logfile, 'r') as fid:
refs = list((csv.reader(fid, delimiter=',')))
name_refs = {}
for item in refs:
if len(item) == 4:
name_refs[item[1]] = item[2]
# read proposed sorting
with open(fname_sort_neg, 'r') as fid:
job_channels = [int(os.path.basename(line.strip())[8:-3])
for line in fid.readlines()]
used_refs = set()
for chan in job_channels:
print('{} (CSC{}) is referenced to {}'.
format(int_names[chan], chan, name_refs[int_names[chan]]))
used_refs.add(name_refs[int_names[chan]])
print('The {} channels in {} use the following {} references: {}'.
format(len(job_channels), fname_sort_neg,
len(used_refs), sorted(used_refs)))
if __name__ == "__main__":
fname_logfile = glob.glob("CheetahLogFile_*.csv")[0]
fname_channels = "do_sort_neg.txt"
fname_channel_names = "channel_names.csv"
print("Checking {} for double references.".format(fname_channels))
print("Using {} and {}".format(fname_logfile, fname_channel_names))
main(fname_channels, fname_logfile, fname_channel_names)
|
|
0173086c95719e1ae12a431626f181e402f9e588
|
http_simulate_news_app.py
|
http_simulate_news_app.py
|
import random
import gevent
from locust import HttpLocust, TaskSet, task
HTTP_USER = "test"
HTTP_PASSWORD = "1"
class NewsAppApi(TaskSet):
def get_news(self):
self.client.get("/news", auth=(HTTP_USER, HTTP_PASSWORD) )
def get_single_news(self, news_id):
self.client.get("/news/{}".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}")
def _make_comment_request(self, method, news_id):
self.client.request(method, "/news/{}/comments".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}/comments")
def get_single_news_comments(self, news_id):
self._make_comment_request('GET', news_id)
def add_news_comment(self, news_id):
self._make_comment_request('POST', news_id)
def edit_news_comment(self, news_id):
self._make_comment_request('PUT', news_id)
def delete_news_comment(self, news_id):
self._make_comment_request('DELETE', news_id)
class NormalUserBehavior(NewsAppApi):
@task(10)
def read_news(self):
news_id = random.randint(1, 1000)
self.get_single_news(news_id)
gevent.sleep(3)
self.get_single_news_comments(news_id)
@task(3)
def do_comment_news(self):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
@task(1)
def do_edit_news_comments(self):
news_id = random.randint(1, 1000)
self.edit_news_comment(news_id)
@task(1)
def do_delete_news_comments(self):
news_id = random.randint(1, 1000)
self.delete_news_comment(news_id)
def on_start(self):
self.get_news()
class SpamUserBehavior(NewsAppApi):
@task(1)
def do_comment_news(self):
for i in range(1, 10):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
self.edit_news_comment(news_id)
def on_start(self):
self.get_news()
class NormalUserLocust(HttpLocust):
task_set = NormalUserBehavior
weight = 10
min_wait = 100
max_wait = 500
class SpamUserLocust(HttpLocust):
task_set = SpamUserBehavior
weight = 1
min_wait = 1
max_wait = 1
|
Add news app user behavior simulator
|
Add news app user behavior simulator
|
Python
|
apache-2.0
|
drednout/locust_on_meetup
|
Add news app user behavior simulator
|
import random
import gevent
from locust import HttpLocust, TaskSet, task
HTTP_USER = "test"
HTTP_PASSWORD = "1"
class NewsAppApi(TaskSet):
def get_news(self):
self.client.get("/news", auth=(HTTP_USER, HTTP_PASSWORD) )
def get_single_news(self, news_id):
self.client.get("/news/{}".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}")
def _make_comment_request(self, method, news_id):
self.client.request(method, "/news/{}/comments".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}/comments")
def get_single_news_comments(self, news_id):
self._make_comment_request('GET', news_id)
def add_news_comment(self, news_id):
self._make_comment_request('POST', news_id)
def edit_news_comment(self, news_id):
self._make_comment_request('PUT', news_id)
def delete_news_comment(self, news_id):
self._make_comment_request('DELETE', news_id)
class NormalUserBehavior(NewsAppApi):
@task(10)
def read_news(self):
news_id = random.randint(1, 1000)
self.get_single_news(news_id)
gevent.sleep(3)
self.get_single_news_comments(news_id)
@task(3)
def do_comment_news(self):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
@task(1)
def do_edit_news_comments(self):
news_id = random.randint(1, 1000)
self.edit_news_comment(news_id)
@task(1)
def do_delete_news_comments(self):
news_id = random.randint(1, 1000)
self.delete_news_comment(news_id)
def on_start(self):
self.get_news()
class SpamUserBehavior(NewsAppApi):
@task(1)
def do_comment_news(self):
for i in range(1, 10):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
self.edit_news_comment(news_id)
def on_start(self):
self.get_news()
class NormalUserLocust(HttpLocust):
task_set = NormalUserBehavior
weight = 10
min_wait = 100
max_wait = 500
class SpamUserLocust(HttpLocust):
task_set = SpamUserBehavior
weight = 1
min_wait = 1
max_wait = 1
|
<commit_before><commit_msg>Add news app user behavior simulator<commit_after>
|
import random
import gevent
from locust import HttpLocust, TaskSet, task
HTTP_USER = "test"
HTTP_PASSWORD = "1"
class NewsAppApi(TaskSet):
def get_news(self):
self.client.get("/news", auth=(HTTP_USER, HTTP_PASSWORD) )
def get_single_news(self, news_id):
self.client.get("/news/{}".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}")
def _make_comment_request(self, method, news_id):
self.client.request(method, "/news/{}/comments".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}/comments")
def get_single_news_comments(self, news_id):
self._make_comment_request('GET', news_id)
def add_news_comment(self, news_id):
self._make_comment_request('POST', news_id)
def edit_news_comment(self, news_id):
self._make_comment_request('PUT', news_id)
def delete_news_comment(self, news_id):
self._make_comment_request('DELETE', news_id)
class NormalUserBehavior(NewsAppApi):
@task(10)
def read_news(self):
news_id = random.randint(1, 1000)
self.get_single_news(news_id)
gevent.sleep(3)
self.get_single_news_comments(news_id)
@task(3)
def do_comment_news(self):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
@task(1)
def do_edit_news_comments(self):
news_id = random.randint(1, 1000)
self.edit_news_comment(news_id)
@task(1)
def do_delete_news_comments(self):
news_id = random.randint(1, 1000)
self.delete_news_comment(news_id)
def on_start(self):
self.get_news()
class SpamUserBehavior(NewsAppApi):
@task(1)
def do_comment_news(self):
for i in range(1, 10):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
self.edit_news_comment(news_id)
def on_start(self):
self.get_news()
class NormalUserLocust(HttpLocust):
task_set = NormalUserBehavior
weight = 10
min_wait = 100
max_wait = 500
class SpamUserLocust(HttpLocust):
task_set = SpamUserBehavior
weight = 1
min_wait = 1
max_wait = 1
|
Add news app user behavior simulatorimport random
import gevent
from locust import HttpLocust, TaskSet, task
HTTP_USER = "test"
HTTP_PASSWORD = "1"
class NewsAppApi(TaskSet):
def get_news(self):
self.client.get("/news", auth=(HTTP_USER, HTTP_PASSWORD) )
def get_single_news(self, news_id):
self.client.get("/news/{}".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}")
def _make_comment_request(self, method, news_id):
self.client.request(method, "/news/{}/comments".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}/comments")
def get_single_news_comments(self, news_id):
self._make_comment_request('GET', news_id)
def add_news_comment(self, news_id):
self._make_comment_request('POST', news_id)
def edit_news_comment(self, news_id):
self._make_comment_request('PUT', news_id)
def delete_news_comment(self, news_id):
self._make_comment_request('DELETE', news_id)
class NormalUserBehavior(NewsAppApi):
@task(10)
def read_news(self):
news_id = random.randint(1, 1000)
self.get_single_news(news_id)
gevent.sleep(3)
self.get_single_news_comments(news_id)
@task(3)
def do_comment_news(self):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
@task(1)
def do_edit_news_comments(self):
news_id = random.randint(1, 1000)
self.edit_news_comment(news_id)
@task(1)
def do_delete_news_comments(self):
news_id = random.randint(1, 1000)
self.delete_news_comment(news_id)
def on_start(self):
self.get_news()
class SpamUserBehavior(NewsAppApi):
@task(1)
def do_comment_news(self):
for i in range(1, 10):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
self.edit_news_comment(news_id)
def on_start(self):
self.get_news()
class NormalUserLocust(HttpLocust):
task_set = NormalUserBehavior
weight = 10
min_wait = 100
max_wait = 500
class SpamUserLocust(HttpLocust):
task_set = SpamUserBehavior
weight = 1
min_wait = 1
max_wait = 1
|
<commit_before><commit_msg>Add news app user behavior simulator<commit_after>import random
import gevent
from locust import HttpLocust, TaskSet, task
HTTP_USER = "test"
HTTP_PASSWORD = "1"
class NewsAppApi(TaskSet):
def get_news(self):
self.client.get("/news", auth=(HTTP_USER, HTTP_PASSWORD) )
def get_single_news(self, news_id):
self.client.get("/news/{}".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}")
def _make_comment_request(self, method, news_id):
self.client.request(method, "/news/{}/comments".format(news_id), auth=(HTTP_USER, HTTP_PASSWORD),
name="/news/{id}/comments")
def get_single_news_comments(self, news_id):
self._make_comment_request('GET', news_id)
def add_news_comment(self, news_id):
self._make_comment_request('POST', news_id)
def edit_news_comment(self, news_id):
self._make_comment_request('PUT', news_id)
def delete_news_comment(self, news_id):
self._make_comment_request('DELETE', news_id)
class NormalUserBehavior(NewsAppApi):
@task(10)
def read_news(self):
news_id = random.randint(1, 1000)
self.get_single_news(news_id)
gevent.sleep(3)
self.get_single_news_comments(news_id)
@task(3)
def do_comment_news(self):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
@task(1)
def do_edit_news_comments(self):
news_id = random.randint(1, 1000)
self.edit_news_comment(news_id)
@task(1)
def do_delete_news_comments(self):
news_id = random.randint(1, 1000)
self.delete_news_comment(news_id)
def on_start(self):
self.get_news()
class SpamUserBehavior(NewsAppApi):
@task(1)
def do_comment_news(self):
for i in range(1, 10):
news_id = random.randint(1, 1000)
self.add_news_comment(news_id)
self.edit_news_comment(news_id)
def on_start(self):
self.get_news()
class NormalUserLocust(HttpLocust):
task_set = NormalUserBehavior
weight = 10
min_wait = 100
max_wait = 500
class SpamUserLocust(HttpLocust):
task_set = SpamUserBehavior
weight = 1
min_wait = 1
max_wait = 1
|
|
5c2da7acfa53a179328d1aa2106c298533047af0
|
lintcode/Medium/187_Gas_Station.py
|
lintcode/Medium/187_Gas_Station.py
|
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
# write your code here
# Solution 1
total_left_gas = 0
sum = 0
res = 0
for i in range(len(gas)):
sum = sum + gas[i] - cost[i]
if (sum < 0):
sum = 0
res = i + 1
total_left_gas += gas[i] - cost[i]
return -1 if total_left_gas < 0 or res >= len(gas) else res
|
Add solution to lintcode question 187
|
Add solution to lintcode question 187
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 187
|
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
# write your code here
# Solution 1
total_left_gas = 0
sum = 0
res = 0
for i in range(len(gas)):
sum = sum + gas[i] - cost[i]
if (sum < 0):
sum = 0
res = i + 1
total_left_gas += gas[i] - cost[i]
return -1 if total_left_gas < 0 or res >= len(gas) else res
|
<commit_before><commit_msg>Add solution to lintcode question 187<commit_after>
|
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
# write your code here
# Solution 1
total_left_gas = 0
sum = 0
res = 0
for i in range(len(gas)):
sum = sum + gas[i] - cost[i]
if (sum < 0):
sum = 0
res = i + 1
total_left_gas += gas[i] - cost[i]
return -1 if total_left_gas < 0 or res >= len(gas) else res
|
Add solution to lintcode question 187class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
# write your code here
# Solution 1
total_left_gas = 0
sum = 0
res = 0
for i in range(len(gas)):
sum = sum + gas[i] - cost[i]
if (sum < 0):
sum = 0
res = i + 1
total_left_gas += gas[i] - cost[i]
return -1 if total_left_gas < 0 or res >= len(gas) else res
|
<commit_before><commit_msg>Add solution to lintcode question 187<commit_after>class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
# write your code here
# Solution 1
total_left_gas = 0
sum = 0
res = 0
for i in range(len(gas)):
sum = sum + gas[i] - cost[i]
if (sum < 0):
sum = 0
res = i + 1
total_left_gas += gas[i] - cost[i]
return -1 if total_left_gas < 0 or res >= len(gas) else res
|
|
1abe318ffc8ece592d07ca9657146a42517a16f2
|
tests/test52.py
|
tests/test52.py
|
import judicious
judicious.seed("cc722bf6-e319-cf63-a671-cbae64bfdb0f")
# 1 (complete): 3799aa89-ccae-c268-d0e8-cc4e9ddddee4
# 2 (timeout) : 4d30601d-dfe3-ee53-8594-7fc0aa8e68ec
# 3 (complete): fe07a885-53c3-9a22-c93e-91436e5d8f0c
# 1 (complete): 4f4d13ed-7d1c-cbee-638d-6aee5188c929
# 2 (timeout) : 720ebe41-5987-b9f0-b571-fd7fb50f2b05
# 3 (timeout) : 358e7d25-af92-8a18-23ec-49025aecc87b
# 4 (complete) : cab5c911-741c-8721-d851-483669940626
def experiment():
with judicious.Person(lifetime=60) as person:
j1 = person.joke()
j2 = person.joke()
j3 = person.joke()
j4 = person.joke()
person.complete()
return [j1, j2, j3, j4]
results = judicious.map3(experiment, [None for _ in range(2)])
print(results)
|
Add demo w/ context manager and lifetime
|
Add demo w/ context manager and lifetime
|
Python
|
mit
|
suchow/judicious,suchow/judicious,suchow/judicious
|
Add demo w/ context manager and lifetime
|
import judicious
judicious.seed("cc722bf6-e319-cf63-a671-cbae64bfdb0f")
# 1 (complete): 3799aa89-ccae-c268-d0e8-cc4e9ddddee4
# 2 (timeout) : 4d30601d-dfe3-ee53-8594-7fc0aa8e68ec
# 3 (complete): fe07a885-53c3-9a22-c93e-91436e5d8f0c
# 1 (complete): 4f4d13ed-7d1c-cbee-638d-6aee5188c929
# 2 (timeout) : 720ebe41-5987-b9f0-b571-fd7fb50f2b05
# 3 (timeout) : 358e7d25-af92-8a18-23ec-49025aecc87b
# 4 (complete) : cab5c911-741c-8721-d851-483669940626
def experiment():
with judicious.Person(lifetime=60) as person:
j1 = person.joke()
j2 = person.joke()
j3 = person.joke()
j4 = person.joke()
person.complete()
return [j1, j2, j3, j4]
results = judicious.map3(experiment, [None for _ in range(2)])
print(results)
|
<commit_before><commit_msg>Add demo w/ context manager and lifetime<commit_after>
|
import judicious
judicious.seed("cc722bf6-e319-cf63-a671-cbae64bfdb0f")
# 1 (complete): 3799aa89-ccae-c268-d0e8-cc4e9ddddee4
# 2 (timeout) : 4d30601d-dfe3-ee53-8594-7fc0aa8e68ec
# 3 (complete): fe07a885-53c3-9a22-c93e-91436e5d8f0c
# 1 (complete): 4f4d13ed-7d1c-cbee-638d-6aee5188c929
# 2 (timeout) : 720ebe41-5987-b9f0-b571-fd7fb50f2b05
# 3 (timeout) : 358e7d25-af92-8a18-23ec-49025aecc87b
# 4 (complete) : cab5c911-741c-8721-d851-483669940626
def experiment():
with judicious.Person(lifetime=60) as person:
j1 = person.joke()
j2 = person.joke()
j3 = person.joke()
j4 = person.joke()
person.complete()
return [j1, j2, j3, j4]
results = judicious.map3(experiment, [None for _ in range(2)])
print(results)
|
Add demo w/ context manager and lifetimeimport judicious
judicious.seed("cc722bf6-e319-cf63-a671-cbae64bfdb0f")
# 1 (complete): 3799aa89-ccae-c268-d0e8-cc4e9ddddee4
# 2 (timeout) : 4d30601d-dfe3-ee53-8594-7fc0aa8e68ec
# 3 (complete): fe07a885-53c3-9a22-c93e-91436e5d8f0c
# 1 (complete): 4f4d13ed-7d1c-cbee-638d-6aee5188c929
# 2 (timeout) : 720ebe41-5987-b9f0-b571-fd7fb50f2b05
# 3 (timeout) : 358e7d25-af92-8a18-23ec-49025aecc87b
# 4 (complete) : cab5c911-741c-8721-d851-483669940626
def experiment():
with judicious.Person(lifetime=60) as person:
j1 = person.joke()
j2 = person.joke()
j3 = person.joke()
j4 = person.joke()
person.complete()
return [j1, j2, j3, j4]
results = judicious.map3(experiment, [None for _ in range(2)])
print(results)
|
<commit_before><commit_msg>Add demo w/ context manager and lifetime<commit_after>import judicious
judicious.seed("cc722bf6-e319-cf63-a671-cbae64bfdb0f")
# 1 (complete): 3799aa89-ccae-c268-d0e8-cc4e9ddddee4
# 2 (timeout) : 4d30601d-dfe3-ee53-8594-7fc0aa8e68ec
# 3 (complete): fe07a885-53c3-9a22-c93e-91436e5d8f0c
# 1 (complete): 4f4d13ed-7d1c-cbee-638d-6aee5188c929
# 2 (timeout) : 720ebe41-5987-b9f0-b571-fd7fb50f2b05
# 3 (timeout) : 358e7d25-af92-8a18-23ec-49025aecc87b
# 4 (complete) : cab5c911-741c-8721-d851-483669940626
def experiment():
with judicious.Person(lifetime=60) as person:
j1 = person.joke()
j2 = person.joke()
j3 = person.joke()
j4 = person.joke()
person.complete()
return [j1, j2, j3, j4]
results = judicious.map3(experiment, [None for _ in range(2)])
print(results)
|
|
4dea108334acb98cc11a45cfa362f372bb052c79
|
galleries/migrations/0008_auto_20170503_1704.py
|
galleries/migrations/0008_auto_20170503_1704.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 00:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0007_refresh_all_media_records'),
]
operations = [
migrations.AlterField(
model_name='media',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
Add a migration for the previous commit
|
Add a migration for the previous commit
|
Python
|
mit
|
DylanMcCall/stuartmccall.ca,DylanMcCall/stuartmccall.ca,DylanMcCall/stuartmccall.ca
|
Add a migration for the previous commit
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 00:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0007_refresh_all_media_records'),
]
operations = [
migrations.AlterField(
model_name='media',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
<commit_before><commit_msg>Add a migration for the previous commit<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 00:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0007_refresh_all_media_records'),
]
operations = [
migrations.AlterField(
model_name='media',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
Add a migration for the previous commit# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 00:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0007_refresh_all_media_records'),
]
operations = [
migrations.AlterField(
model_name='media',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
<commit_before><commit_msg>Add a migration for the previous commit<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 00:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('galleries', '0007_refresh_all_media_records'),
]
operations = [
migrations.AlterField(
model_name='media',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
|
10f14597a84a25565f5485ae452ae95c361a2b1d
|
integrations/node_js/my_first_test.py
|
integrations/node_js/my_first_test.py
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("http://xkcd.com/353/")
self.wait_for_element("div#comic")
self.click('a[rel="license"]')
text = self.wait_for_element('center').text
self.assertTrue("reuse any of my drawings" in text)
self.open("http://xkcd.com/1481/")
self.click_link_text('Blag')
self.wait_for_text("The blag", "header h2")
self.update_text("input#s", "Robots!\n")
self.wait_for_text("Hooray robots!", "#content")
self.open("http://xkcd.com/1319/")
self.wait_for_text("Automation", "div#ctitle")
|
Add an example test to run
|
Add an example test to run
|
Python
|
mit
|
possoumous/Watchers,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,possoumous/Watchers,possoumous/Watchers,ktp420/SeleniumBase,mdmintz/seleniumspot,ktp420/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,possoumous/Watchers,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,ktp420/SeleniumBase,mdmintz/seleniumspot,seleniumbase/SeleniumBase,ktp420/SeleniumBase
|
Add an example test to run
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("http://xkcd.com/353/")
self.wait_for_element("div#comic")
self.click('a[rel="license"]')
text = self.wait_for_element('center').text
self.assertTrue("reuse any of my drawings" in text)
self.open("http://xkcd.com/1481/")
self.click_link_text('Blag')
self.wait_for_text("The blag", "header h2")
self.update_text("input#s", "Robots!\n")
self.wait_for_text("Hooray robots!", "#content")
self.open("http://xkcd.com/1319/")
self.wait_for_text("Automation", "div#ctitle")
|
<commit_before><commit_msg>Add an example test to run<commit_after>
|
from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("http://xkcd.com/353/")
self.wait_for_element("div#comic")
self.click('a[rel="license"]')
text = self.wait_for_element('center').text
self.assertTrue("reuse any of my drawings" in text)
self.open("http://xkcd.com/1481/")
self.click_link_text('Blag')
self.wait_for_text("The blag", "header h2")
self.update_text("input#s", "Robots!\n")
self.wait_for_text("Hooray robots!", "#content")
self.open("http://xkcd.com/1319/")
self.wait_for_text("Automation", "div#ctitle")
|
Add an example test to runfrom seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("http://xkcd.com/353/")
self.wait_for_element("div#comic")
self.click('a[rel="license"]')
text = self.wait_for_element('center').text
self.assertTrue("reuse any of my drawings" in text)
self.open("http://xkcd.com/1481/")
self.click_link_text('Blag')
self.wait_for_text("The blag", "header h2")
self.update_text("input#s", "Robots!\n")
self.wait_for_text("Hooray robots!", "#content")
self.open("http://xkcd.com/1319/")
self.wait_for_text("Automation", "div#ctitle")
|
<commit_before><commit_msg>Add an example test to run<commit_after>from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open("http://xkcd.com/353/")
self.wait_for_element("div#comic")
self.click('a[rel="license"]')
text = self.wait_for_element('center').text
self.assertTrue("reuse any of my drawings" in text)
self.open("http://xkcd.com/1481/")
self.click_link_text('Blag')
self.wait_for_text("The blag", "header h2")
self.update_text("input#s", "Robots!\n")
self.wait_for_text("Hooray robots!", "#content")
self.open("http://xkcd.com/1319/")
self.wait_for_text("Automation", "div#ctitle")
|
|
9b0c0e0ceedde081846a2360c45eb6f14b9cd9f5
|
elasticsearch_django/migrations/0003_auto_20160926_2021.py
|
elasticsearch_django/migrations/0003_auto_20160926_2021.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elasticsearch_django', '0002_searchquery_duration'),
]
operations = [
migrations.AlterModelOptions(
name='searchquery',
options={'verbose_name': 'Search query', 'verbose_name_plural': 'Search queries'},
),
]
|
Add migration for SearchQuery meta options
|
Add migration for SearchQuery meta options
|
Python
|
mit
|
yunojuno/elasticsearch-django
|
Add migration for SearchQuery meta options
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elasticsearch_django', '0002_searchquery_duration'),
]
operations = [
migrations.AlterModelOptions(
name='searchquery',
options={'verbose_name': 'Search query', 'verbose_name_plural': 'Search queries'},
),
]
|
<commit_before><commit_msg>Add migration for SearchQuery meta options<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elasticsearch_django', '0002_searchquery_duration'),
]
operations = [
migrations.AlterModelOptions(
name='searchquery',
options={'verbose_name': 'Search query', 'verbose_name_plural': 'Search queries'},
),
]
|
Add migration for SearchQuery meta options# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elasticsearch_django', '0002_searchquery_duration'),
]
operations = [
migrations.AlterModelOptions(
name='searchquery',
options={'verbose_name': 'Search query', 'verbose_name_plural': 'Search queries'},
),
]
|
<commit_before><commit_msg>Add migration for SearchQuery meta options<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('elasticsearch_django', '0002_searchquery_duration'),
]
operations = [
migrations.AlterModelOptions(
name='searchquery',
options={'verbose_name': 'Search query', 'verbose_name_plural': 'Search queries'},
),
]
|
|
fcdb3bbc64f7ecb2761ccc00bff5ed8472e4d65d
|
faadata/airports/management/commands/ourairports_import.py
|
faadata/airports/management/commands/ourairports_import.py
|
import csv
import os
from decimal import Decimal
from django.contrib.gis.geos import Point
from django.core.cache import cache
from django.core.management.base import BaseCommand
from optparse import make_option
from faadata.airports.models import Airport
class Command(BaseCommand): # pragma: no cover
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the OurAirports data is stored.'),
)
help = ("Imports data from the OurAirports data download.")
def handle(self, *args, **options):
ourairports_file = open(os.path.join(options['path'], 'airports.csv'), 'rU')
ourairports_csv = csv.reader(ourairports_file, quoting=csv.QUOTE_MINIMAL, delimiter=',')
for airport in ourairports_csv:
print airport
if airport[0] != 'id':
airport_id = "oura-%s" % airport[0]
try:
this_airport = Airport.objects.get(location_identifier=airport[1])
except Airport.DoesNotExist:
this_airport = Airport(location_identifier=airport[1])
this_airport.facility_site_number = airport_id
this_airport.location_identifier = airport[1]
this_airport.facility_type = airport[2]
this_airport.facility_name = airport[3][:50]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.point = Point((Decimal(airport[5]), Decimal(airport[4])),)
if len(airport[6]) > 0:
this_airport.elevation_msl = airport[6]
else:
this_airport.elevation_msl = -1111
this_airport.icao_identifier = airport[13]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.save()
else:
print("No point was available for %s so it was skipped." % airport[1])
# [00] "id",
# [01] "ident",
# [02] "type",
# [03] "name",
# [04] "latitude_deg",
# [05] "longitude_deg",
# [06] "elevation_ft",
# [07] "continent",
# [08] "iso_country",
# [09] "iso_region",
# [10] "municipality",
# [11] "scheduled_service",
# [12] "gps_code",
# [13] "iata_code",
# [14] "local_code",
# [15] "home_link",
# [16] "wikipedia_link",
# [17] "keywords"
|
Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.
|
Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.
|
Python
|
bsd-3-clause
|
adamfast/faadata,adamfast/faadata,adamfast/faadata
|
Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.
|
import csv
import os
from decimal import Decimal
from django.contrib.gis.geos import Point
from django.core.cache import cache
from django.core.management.base import BaseCommand
from optparse import make_option
from faadata.airports.models import Airport
class Command(BaseCommand): # pragma: no cover
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the OurAirports data is stored.'),
)
help = ("Imports data from the OurAirports data download.")
def handle(self, *args, **options):
ourairports_file = open(os.path.join(options['path'], 'airports.csv'), 'rU')
ourairports_csv = csv.reader(ourairports_file, quoting=csv.QUOTE_MINIMAL, delimiter=',')
for airport in ourairports_csv:
print airport
if airport[0] != 'id':
airport_id = "oura-%s" % airport[0]
try:
this_airport = Airport.objects.get(location_identifier=airport[1])
except Airport.DoesNotExist:
this_airport = Airport(location_identifier=airport[1])
this_airport.facility_site_number = airport_id
this_airport.location_identifier = airport[1]
this_airport.facility_type = airport[2]
this_airport.facility_name = airport[3][:50]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.point = Point((Decimal(airport[5]), Decimal(airport[4])),)
if len(airport[6]) > 0:
this_airport.elevation_msl = airport[6]
else:
this_airport.elevation_msl = -1111
this_airport.icao_identifier = airport[13]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.save()
else:
print("No point was available for %s so it was skipped." % airport[1])
# [00] "id",
# [01] "ident",
# [02] "type",
# [03] "name",
# [04] "latitude_deg",
# [05] "longitude_deg",
# [06] "elevation_ft",
# [07] "continent",
# [08] "iso_country",
# [09] "iso_region",
# [10] "municipality",
# [11] "scheduled_service",
# [12] "gps_code",
# [13] "iata_code",
# [14] "local_code",
# [15] "home_link",
# [16] "wikipedia_link",
# [17] "keywords"
|
<commit_before><commit_msg>Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.<commit_after>
|
import csv
import os
from decimal import Decimal
from django.contrib.gis.geos import Point
from django.core.cache import cache
from django.core.management.base import BaseCommand
from optparse import make_option
from faadata.airports.models import Airport
class Command(BaseCommand): # pragma: no cover
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the OurAirports data is stored.'),
)
help = ("Imports data from the OurAirports data download.")
def handle(self, *args, **options):
ourairports_file = open(os.path.join(options['path'], 'airports.csv'), 'rU')
ourairports_csv = csv.reader(ourairports_file, quoting=csv.QUOTE_MINIMAL, delimiter=',')
for airport in ourairports_csv:
print airport
if airport[0] != 'id':
airport_id = "oura-%s" % airport[0]
try:
this_airport = Airport.objects.get(location_identifier=airport[1])
except Airport.DoesNotExist:
this_airport = Airport(location_identifier=airport[1])
this_airport.facility_site_number = airport_id
this_airport.location_identifier = airport[1]
this_airport.facility_type = airport[2]
this_airport.facility_name = airport[3][:50]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.point = Point((Decimal(airport[5]), Decimal(airport[4])),)
if len(airport[6]) > 0:
this_airport.elevation_msl = airport[6]
else:
this_airport.elevation_msl = -1111
this_airport.icao_identifier = airport[13]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.save()
else:
print("No point was available for %s so it was skipped." % airport[1])
# [00] "id",
# [01] "ident",
# [02] "type",
# [03] "name",
# [04] "latitude_deg",
# [05] "longitude_deg",
# [06] "elevation_ft",
# [07] "continent",
# [08] "iso_country",
# [09] "iso_region",
# [10] "municipality",
# [11] "scheduled_service",
# [12] "gps_code",
# [13] "iata_code",
# [14] "local_code",
# [15] "home_link",
# [16] "wikipedia_link",
# [17] "keywords"
|
Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.import csv
import os
from decimal import Decimal
from django.contrib.gis.geos import Point
from django.core.cache import cache
from django.core.management.base import BaseCommand
from optparse import make_option
from faadata.airports.models import Airport
class Command(BaseCommand): # pragma: no cover
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the OurAirports data is stored.'),
)
help = ("Imports data from the OurAirports data download.")
def handle(self, *args, **options):
ourairports_file = open(os.path.join(options['path'], 'airports.csv'), 'rU')
ourairports_csv = csv.reader(ourairports_file, quoting=csv.QUOTE_MINIMAL, delimiter=',')
for airport in ourairports_csv:
print airport
if airport[0] != 'id':
airport_id = "oura-%s" % airport[0]
try:
this_airport = Airport.objects.get(location_identifier=airport[1])
except Airport.DoesNotExist:
this_airport = Airport(location_identifier=airport[1])
this_airport.facility_site_number = airport_id
this_airport.location_identifier = airport[1]
this_airport.facility_type = airport[2]
this_airport.facility_name = airport[3][:50]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.point = Point((Decimal(airport[5]), Decimal(airport[4])),)
if len(airport[6]) > 0:
this_airport.elevation_msl = airport[6]
else:
this_airport.elevation_msl = -1111
this_airport.icao_identifier = airport[13]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.save()
else:
print("No point was available for %s so it was skipped." % airport[1])
# [00] "id",
# [01] "ident",
# [02] "type",
# [03] "name",
# [04] "latitude_deg",
# [05] "longitude_deg",
# [06] "elevation_ft",
# [07] "continent",
# [08] "iso_country",
# [09] "iso_region",
# [10] "municipality",
# [11] "scheduled_service",
# [12] "gps_code",
# [13] "iata_code",
# [14] "local_code",
# [15] "home_link",
# [16] "wikipedia_link",
# [17] "keywords"
|
<commit_before><commit_msg>Add an importer for the dataset from OurAirports, usable instead of the FAA’s data.<commit_after>import csv
import os
from decimal import Decimal
from django.contrib.gis.geos import Point
from django.core.cache import cache
from django.core.management.base import BaseCommand
from optparse import make_option
from faadata.airports.models import Airport
class Command(BaseCommand): # pragma: no cover
option_list = BaseCommand.option_list + (
make_option('--path', default='', dest='path',
help='The directory where the OurAirports data is stored.'),
)
help = ("Imports data from the OurAirports data download.")
def handle(self, *args, **options):
ourairports_file = open(os.path.join(options['path'], 'airports.csv'), 'rU')
ourairports_csv = csv.reader(ourairports_file, quoting=csv.QUOTE_MINIMAL, delimiter=',')
for airport in ourairports_csv:
print airport
if airport[0] != 'id':
airport_id = "oura-%s" % airport[0]
try:
this_airport = Airport.objects.get(location_identifier=airport[1])
except Airport.DoesNotExist:
this_airport = Airport(location_identifier=airport[1])
this_airport.facility_site_number = airport_id
this_airport.location_identifier = airport[1]
this_airport.facility_type = airport[2]
this_airport.facility_name = airport[3][:50]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.point = Point((Decimal(airport[5]), Decimal(airport[4])),)
if len(airport[6]) > 0:
this_airport.elevation_msl = airport[6]
else:
this_airport.elevation_msl = -1111
this_airport.icao_identifier = airport[13]
if len(airport[4]) > 0 and len(airport[5]) > 0:
this_airport.save()
else:
print("No point was available for %s so it was skipped." % airport[1])
# [00] "id",
# [01] "ident",
# [02] "type",
# [03] "name",
# [04] "latitude_deg",
# [05] "longitude_deg",
# [06] "elevation_ft",
# [07] "continent",
# [08] "iso_country",
# [09] "iso_region",
# [10] "municipality",
# [11] "scheduled_service",
# [12] "gps_code",
# [13] "iata_code",
# [14] "local_code",
# [15] "home_link",
# [16] "wikipedia_link",
# [17] "keywords"
|
|
bd3506c158c9aa03fabcd15d165bd1bc2b0845da
|
src/app_engine/analytics_enums_test.py
|
src/app_engine/analytics_enums_test.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
import unittest
from analytics_enums import BigquerySchemaClass
from analytics_enums import EnumClass
class AnalyticsEnumsTest(unittest.TestCase):
"""Test the EnumClass behaves as expected."""
def testEnumClass(self):
value_dict = {
'FOO': 10,
'BAR': 42,
'BAZ': 'test',
'SubEnum': {
'BIM': 'bang',
'BEN': 'boom',
}}
my_enum = EnumClass(value_dict)
self.assertEqual(value_dict['FOO'], my_enum.FOO)
self.assertEqual(value_dict['BAR'], my_enum.BAR)
self.assertEqual(value_dict['BAZ'], my_enum.BAZ)
self.assertEqual(value_dict['SubEnum']['BIM'], my_enum.SubEnum.BIM)
self.assertEqual(value_dict['SubEnum']['BEN'], my_enum.SubEnum.BEN)
self.assertTrue(isinstance(my_enum.SubEnum, EnumClass))
def testBigquerySchemaClass(self):
field1 = 'field1'
field2 = 'field2'
schema_dict = [
{
'name': 'field1',
'type': 'string',
'mode': 'nullable'
},
{
'name': 'field2',
'type': 'timestamp',
'mode': 'nullable'
},
]
my_enum = BigquerySchemaClass(schema_dict)
self.assertEqual(field1, my_enum.FIELD1)
self.assertEqual(field2, my_enum.FIELD2)
|
Add test for classes in analytics_enums module.
|
Add test for classes in analytics_enums module.
Includes tests for classes that convert dictionaries into attributed classes.
|
Python
|
bsd-3-clause
|
jiayliu/apprtc,webrtc/apprtc,82488059/apprtc,shelsonjava/apprtc,virajs/apprtc,arnauorriols/apprtc,aadebuger/docker-apprtc,webrtc/apprtc,todotobe1/apprtc,aadebuger/docker-apprtc,YouthAndra/apprtc,TheKnarf/apprtc,bpyoung92/apprtc,JiYou/apprtc,mvenkatesh431/apprtc,arnauorriols/apprtc,procandi/apprtc,mvenkatesh431/apprtc,virajs/apprtc,shelsonjava/apprtc,shelsonjava/apprtc,jarl-alejandro/apprtc,todotobe1/apprtc,webrtc/apprtc,82488059/apprtc,YouthAndra/apprtc,82488059/apprtc,dengshaodong/docker-apprtc,jiayliu/apprtc,bpyoung92/apprtc,mvenkatesh431/apprtc,smadhusu/AppRTC,arnauorriols/apprtc,procandi/apprtc,82488059/apprtc,JiYou/apprtc,aadebuger/docker-apprtc,dengshaodong/docker-apprtc,mvenkatesh431/apprtc,YouthAndra/apprtc,webrtc/apprtc,virajs/apprtc,mvenkatesh431/apprtc,dengshaodong/docker-apprtc,jarl-alejandro/apprtc,shelsonjava/apprtc,YouthAndra/apprtc,bpyoung92/apprtc,jiayliu/apprtc,arnauorriols/apprtc,TribeMedia/apprtc,webrtc/apprtc,TribeMedia/apprtc,aadebuger/docker-apprtc,jarl-alejandro/apprtc,samdutton/apprtc,jiayliu/apprtc,bpyoung92/apprtc,todotobe1/apprtc,samdutton/apprtc,jiayliu/apprtc,JiYou/apprtc,samdutton/apprtc,todotobe1/apprtc,samdutton/apprtc,procandi/apprtc,TribeMedia/apprtc,arnauorriols/apprtc,JiYou/apprtc,procandi/apprtc,smadhusu/AppRTC,TribeMedia/apprtc,TheKnarf/apprtc,procandi/apprtc,TribeMedia/apprtc,82488059/apprtc,aadebuger/docker-apprtc,dengshaodong/docker-apprtc,virajs/apprtc,TheKnarf/apprtc,samdutton/apprtc,jarl-alejandro/apprtc,virajs/apprtc,smadhusu/AppRTC,smadhusu/AppRTC,jarl-alejandro/apprtc,TheKnarf/apprtc,bpyoung92/apprtc,todotobe1/apprtc,dengshaodong/docker-apprtc,YouthAndra/apprtc,smadhusu/AppRTC,shelsonjava/apprtc,TheKnarf/apprtc,JiYou/apprtc
|
Add test for classes in analytics_enums module.
Includes tests for classes that convert dictionaries into attributed classes.
|
# Copyright 2014 Google Inc. All Rights Reserved.
import unittest
from analytics_enums import BigquerySchemaClass
from analytics_enums import EnumClass
class AnalyticsEnumsTest(unittest.TestCase):
"""Test the EnumClass behaves as expected."""
def testEnumClass(self):
value_dict = {
'FOO': 10,
'BAR': 42,
'BAZ': 'test',
'SubEnum': {
'BIM': 'bang',
'BEN': 'boom',
}}
my_enum = EnumClass(value_dict)
self.assertEqual(value_dict['FOO'], my_enum.FOO)
self.assertEqual(value_dict['BAR'], my_enum.BAR)
self.assertEqual(value_dict['BAZ'], my_enum.BAZ)
self.assertEqual(value_dict['SubEnum']['BIM'], my_enum.SubEnum.BIM)
self.assertEqual(value_dict['SubEnum']['BEN'], my_enum.SubEnum.BEN)
self.assertTrue(isinstance(my_enum.SubEnum, EnumClass))
def testBigquerySchemaClass(self):
field1 = 'field1'
field2 = 'field2'
schema_dict = [
{
'name': 'field1',
'type': 'string',
'mode': 'nullable'
},
{
'name': 'field2',
'type': 'timestamp',
'mode': 'nullable'
},
]
my_enum = BigquerySchemaClass(schema_dict)
self.assertEqual(field1, my_enum.FIELD1)
self.assertEqual(field2, my_enum.FIELD2)
|
<commit_before><commit_msg>Add test for classes in analytics_enums module.
Includes tests for classes that convert dictionaries into attributed classes.<commit_after>
|
# Copyright 2014 Google Inc. All Rights Reserved.
import unittest
from analytics_enums import BigquerySchemaClass
from analytics_enums import EnumClass
class AnalyticsEnumsTest(unittest.TestCase):
"""Test the EnumClass behaves as expected."""
def testEnumClass(self):
value_dict = {
'FOO': 10,
'BAR': 42,
'BAZ': 'test',
'SubEnum': {
'BIM': 'bang',
'BEN': 'boom',
}}
my_enum = EnumClass(value_dict)
self.assertEqual(value_dict['FOO'], my_enum.FOO)
self.assertEqual(value_dict['BAR'], my_enum.BAR)
self.assertEqual(value_dict['BAZ'], my_enum.BAZ)
self.assertEqual(value_dict['SubEnum']['BIM'], my_enum.SubEnum.BIM)
self.assertEqual(value_dict['SubEnum']['BEN'], my_enum.SubEnum.BEN)
self.assertTrue(isinstance(my_enum.SubEnum, EnumClass))
def testBigquerySchemaClass(self):
field1 = 'field1'
field2 = 'field2'
schema_dict = [
{
'name': 'field1',
'type': 'string',
'mode': 'nullable'
},
{
'name': 'field2',
'type': 'timestamp',
'mode': 'nullable'
},
]
my_enum = BigquerySchemaClass(schema_dict)
self.assertEqual(field1, my_enum.FIELD1)
self.assertEqual(field2, my_enum.FIELD2)
|
Add test for classes in analytics_enums module.
Includes tests for classes that convert dictionaries into attributed classes.# Copyright 2014 Google Inc. All Rights Reserved.
import unittest
from analytics_enums import BigquerySchemaClass
from analytics_enums import EnumClass
class AnalyticsEnumsTest(unittest.TestCase):
"""Test the EnumClass behaves as expected."""
def testEnumClass(self):
value_dict = {
'FOO': 10,
'BAR': 42,
'BAZ': 'test',
'SubEnum': {
'BIM': 'bang',
'BEN': 'boom',
}}
my_enum = EnumClass(value_dict)
self.assertEqual(value_dict['FOO'], my_enum.FOO)
self.assertEqual(value_dict['BAR'], my_enum.BAR)
self.assertEqual(value_dict['BAZ'], my_enum.BAZ)
self.assertEqual(value_dict['SubEnum']['BIM'], my_enum.SubEnum.BIM)
self.assertEqual(value_dict['SubEnum']['BEN'], my_enum.SubEnum.BEN)
self.assertTrue(isinstance(my_enum.SubEnum, EnumClass))
def testBigquerySchemaClass(self):
field1 = 'field1'
field2 = 'field2'
schema_dict = [
{
'name': 'field1',
'type': 'string',
'mode': 'nullable'
},
{
'name': 'field2',
'type': 'timestamp',
'mode': 'nullable'
},
]
my_enum = BigquerySchemaClass(schema_dict)
self.assertEqual(field1, my_enum.FIELD1)
self.assertEqual(field2, my_enum.FIELD2)
|
<commit_before><commit_msg>Add test for classes in analytics_enums module.
Includes tests for classes that convert dictionaries into attributed classes.<commit_after># Copyright 2014 Google Inc. All Rights Reserved.
import unittest
from analytics_enums import BigquerySchemaClass
from analytics_enums import EnumClass
class AnalyticsEnumsTest(unittest.TestCase):
"""Test the EnumClass behaves as expected."""
def testEnumClass(self):
value_dict = {
'FOO': 10,
'BAR': 42,
'BAZ': 'test',
'SubEnum': {
'BIM': 'bang',
'BEN': 'boom',
}}
my_enum = EnumClass(value_dict)
self.assertEqual(value_dict['FOO'], my_enum.FOO)
self.assertEqual(value_dict['BAR'], my_enum.BAR)
self.assertEqual(value_dict['BAZ'], my_enum.BAZ)
self.assertEqual(value_dict['SubEnum']['BIM'], my_enum.SubEnum.BIM)
self.assertEqual(value_dict['SubEnum']['BEN'], my_enum.SubEnum.BEN)
self.assertTrue(isinstance(my_enum.SubEnum, EnumClass))
def testBigquerySchemaClass(self):
field1 = 'field1'
field2 = 'field2'
schema_dict = [
{
'name': 'field1',
'type': 'string',
'mode': 'nullable'
},
{
'name': 'field2',
'type': 'timestamp',
'mode': 'nullable'
},
]
my_enum = BigquerySchemaClass(schema_dict)
self.assertEqual(field1, my_enum.FIELD1)
self.assertEqual(field2, my_enum.FIELD2)
|
|
6bab44116eb343b5103b6ed23797a83bc1feedbd
|
shopify/wsgi.py
|
shopify/wsgi.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Add a WSGI application file
|
Add a WSGI application file
|
Python
|
bsd-3-clause
|
CorbanU/corban-shopify,CorbanU/corban-shopify
|
Add a WSGI application file
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
|
<commit_before><commit_msg>Add a WSGI application file<commit_after>
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Add a WSGI application fileimport os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
|
<commit_before><commit_msg>Add a WSGI application file<commit_after>import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Local")
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
|
|
2101fd89059f73078bf5cfc8aa1399fda7ea567e
|
examples/corpus_reader.py
|
examples/corpus_reader.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
class TSVOpener:
def __init__(self, opt={}):
self.name = opt.get('name')
self.file_reader = opt.get('reader', csv)
self.delimiter = opt.get('delimeter', b'\t')
self.tsv = self.open_tsv_and_read_file()
def reader(self):
return self.tsv
def open_tsv_and_read_file(self):
return self.file_reader.reader(self.name, delimiter=self.delimiter)
def build(self):
return self.reader()
@staticmethod
def reader(options={}):
return TSVOpener(options).build
|
Add simple TSV reader for HUWiki corpus
|
Add simple TSV reader for HUWiki corpus
|
Python
|
apache-2.0
|
davidpgero/hungarian-nltk
|
Add simple TSV reader for HUWiki corpus
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
class TSVOpener:
def __init__(self, opt={}):
self.name = opt.get('name')
self.file_reader = opt.get('reader', csv)
self.delimiter = opt.get('delimeter', b'\t')
self.tsv = self.open_tsv_and_read_file()
def reader(self):
return self.tsv
def open_tsv_and_read_file(self):
return self.file_reader.reader(self.name, delimiter=self.delimiter)
def build(self):
return self.reader()
@staticmethod
def reader(options={}):
return TSVOpener(options).build
|
<commit_before><commit_msg>Add simple TSV reader for HUWiki corpus<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
class TSVOpener:
def __init__(self, opt={}):
self.name = opt.get('name')
self.file_reader = opt.get('reader', csv)
self.delimiter = opt.get('delimeter', b'\t')
self.tsv = self.open_tsv_and_read_file()
def reader(self):
return self.tsv
def open_tsv_and_read_file(self):
return self.file_reader.reader(self.name, delimiter=self.delimiter)
def build(self):
return self.reader()
@staticmethod
def reader(options={}):
return TSVOpener(options).build
|
Add simple TSV reader for HUWiki corpus#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
class TSVOpener:
def __init__(self, opt={}):
self.name = opt.get('name')
self.file_reader = opt.get('reader', csv)
self.delimiter = opt.get('delimeter', b'\t')
self.tsv = self.open_tsv_and_read_file()
def reader(self):
return self.tsv
def open_tsv_and_read_file(self):
return self.file_reader.reader(self.name, delimiter=self.delimiter)
def build(self):
return self.reader()
@staticmethod
def reader(options={}):
return TSVOpener(options).build
|
<commit_before><commit_msg>Add simple TSV reader for HUWiki corpus<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
class TSVOpener:
def __init__(self, opt={}):
self.name = opt.get('name')
self.file_reader = opt.get('reader', csv)
self.delimiter = opt.get('delimeter', b'\t')
self.tsv = self.open_tsv_and_read_file()
def reader(self):
return self.tsv
def open_tsv_and_read_file(self):
return self.file_reader.reader(self.name, delimiter=self.delimiter)
def build(self):
return self.reader()
@staticmethod
def reader(options={}):
return TSVOpener(options).build
|
|
26202bffe9f36bb35efed30d4a64e17a56c78cf9
|
scuole/districts/management/commands/dedupedistrictslugs.py
|
scuole/districts/management/commands/dedupedistrictslugs.py
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from scuole.districts.models import District
class Command(BaseCommand):
help = "Dedupe District slugs by adding the county name to the end."
def handle(self, *args, **options):
duplicate_slugs = (
District.objects.values("slug")
.annotate(total=Count("slug"))
.filter(total__gt=1)
)
for duplicate in duplicate_slugs:
slug = duplicate['slug']
for district in District.objects.filter(slug=slug):
district.slug = f"{district.slug}-{district.county.slug}"
district.save()
|
Add dedupe district slug management command
|
Add dedupe district slug management command
|
Python
|
mit
|
texastribune/scuole,texastribune/scuole,texastribune/scuole,texastribune/scuole
|
Add dedupe district slug management command
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from scuole.districts.models import District
class Command(BaseCommand):
help = "Dedupe District slugs by adding the county name to the end."
def handle(self, *args, **options):
duplicate_slugs = (
District.objects.values("slug")
.annotate(total=Count("slug"))
.filter(total__gt=1)
)
for duplicate in duplicate_slugs:
slug = duplicate['slug']
for district in District.objects.filter(slug=slug):
district.slug = f"{district.slug}-{district.county.slug}"
district.save()
|
<commit_before><commit_msg>Add dedupe district slug management command<commit_after>
|
from django.core.management.base import BaseCommand
from django.db.models import Count
from scuole.districts.models import District
class Command(BaseCommand):
help = "Dedupe District slugs by adding the county name to the end."
def handle(self, *args, **options):
duplicate_slugs = (
District.objects.values("slug")
.annotate(total=Count("slug"))
.filter(total__gt=1)
)
for duplicate in duplicate_slugs:
slug = duplicate['slug']
for district in District.objects.filter(slug=slug):
district.slug = f"{district.slug}-{district.county.slug}"
district.save()
|
Add dedupe district slug management commandfrom django.core.management.base import BaseCommand
from django.db.models import Count
from scuole.districts.models import District
class Command(BaseCommand):
help = "Dedupe District slugs by adding the county name to the end."
def handle(self, *args, **options):
duplicate_slugs = (
District.objects.values("slug")
.annotate(total=Count("slug"))
.filter(total__gt=1)
)
for duplicate in duplicate_slugs:
slug = duplicate['slug']
for district in District.objects.filter(slug=slug):
district.slug = f"{district.slug}-{district.county.slug}"
district.save()
|
<commit_before><commit_msg>Add dedupe district slug management command<commit_after>from django.core.management.base import BaseCommand
from django.db.models import Count
from scuole.districts.models import District
class Command(BaseCommand):
help = "Dedupe District slugs by adding the county name to the end."
def handle(self, *args, **options):
duplicate_slugs = (
District.objects.values("slug")
.annotate(total=Count("slug"))
.filter(total__gt=1)
)
for duplicate in duplicate_slugs:
slug = duplicate['slug']
for district in District.objects.filter(slug=slug):
district.slug = f"{district.slug}-{district.county.slug}"
district.save()
|
|
a31461138d20673f718e0bd9fb4bb7aa9c1ff993
|
tools/check_ecs_deps/check_ecs_deps.py
|
tools/check_ecs_deps/check_ecs_deps.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import getopt
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libcairo',
'libpango',
'libglib',
]
binary_target = 'content_shell'
def _main():
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
target = os.path.join(options.build_dir, binary_target)
else:
target = binary_target
forbidden_regexp = re.compile(string.join(kUndesiredLibraryList, '|'))
success = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
print "Failed to execute ldd to analyze dependencies for " + target + ':'
print ' ' + err
print "FAILED\n"
return 1
if out == '':
print "No output to scan for forbidden dependencies?\n"
print "\nFAILED\n"
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
if re.search(forbidden_regexp, d) != None:
success = 0
print "Forbidden library: " + d
if success == 1:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
|
Add a tool to validate ecs library dependencies.
|
Add a tool to validate ecs library dependencies.
The embedded content_shell starting point for applications such as the
ChromeCast shell must not include dependencies that would increase its
memory footprint. This script automatically tests that it does not contain
undesired dependencies.
BUG=none
Review URL: https://codereview.chromium.org/58603003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@234716 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
jaruba/chromium.src,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,Just-D/chromium-1,chuan9/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,ChromiumWebApps/chromium,dednal/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,ltilve/chromium,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,anirudhSK/chromium,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,Just-D/chromium-1,Chilledheart/chromium,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,littlstar/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,anirudhSK/chromium,littlstar/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,dednal/chromium.src,dednal/chromium.src,Chilledheart/chromium,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,markYoungH/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,ltilve/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,M4sse/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,Jonekee/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,Just-D/chromium-1,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,Jonekee/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,markYoungH/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,dushu1203/chromium.src,jaruba/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,littlstar/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,dushu1203/chromium.src,Just-D/chromium-1,ltilve/chromium,ChromiumWebApps/chromium,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src
|
Add a tool to validate ecs library dependencies.
The embedded content_shell starting point for applications such as the
ChromeCast shell must not include dependencies that would increase its
memory footprint. This script automatically tests that it does not contain
undesired dependencies.
BUG=none
Review URL: https://codereview.chromium.org/58603003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@234716 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import getopt
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libcairo',
'libpango',
'libglib',
]
binary_target = 'content_shell'
def _main():
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
target = os.path.join(options.build_dir, binary_target)
else:
target = binary_target
forbidden_regexp = re.compile(string.join(kUndesiredLibraryList, '|'))
success = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
print "Failed to execute ldd to analyze dependencies for " + target + ':'
print ' ' + err
print "FAILED\n"
return 1
if out == '':
print "No output to scan for forbidden dependencies?\n"
print "\nFAILED\n"
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
if re.search(forbidden_regexp, d) != None:
success = 0
print "Forbidden library: " + d
if success == 1:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
|
<commit_before><commit_msg>Add a tool to validate ecs library dependencies.
The embedded content_shell starting point for applications such as the
ChromeCast shell must not include dependencies that would increase its
memory footprint. This script automatically tests that it does not contain
undesired dependencies.
BUG=none
Review URL: https://codereview.chromium.org/58603003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@234716 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import getopt
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libcairo',
'libpango',
'libglib',
]
binary_target = 'content_shell'
def _main():
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
target = os.path.join(options.build_dir, binary_target)
else:
target = binary_target
forbidden_regexp = re.compile(string.join(kUndesiredLibraryList, '|'))
success = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
print "Failed to execute ldd to analyze dependencies for " + target + ':'
print ' ' + err
print "FAILED\n"
return 1
if out == '':
print "No output to scan for forbidden dependencies?\n"
print "\nFAILED\n"
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
if re.search(forbidden_regexp, d) != None:
success = 0
print "Forbidden library: " + d
if success == 1:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
|
Add a tool to validate ecs library dependencies.
The embedded content_shell starting point for applications such as the
ChromeCast shell must not include dependencies that would increase its
memory footprint. This script automatically tests that it does not contain
undesired dependencies.
BUG=none
Review URL: https://codereview.chromium.org/58603003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@234716 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import getopt
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libcairo',
'libpango',
'libglib',
]
binary_target = 'content_shell'
def _main():
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
target = os.path.join(options.build_dir, binary_target)
else:
target = binary_target
forbidden_regexp = re.compile(string.join(kUndesiredLibraryList, '|'))
success = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
print "Failed to execute ldd to analyze dependencies for " + target + ':'
print ' ' + err
print "FAILED\n"
return 1
if out == '':
print "No output to scan for forbidden dependencies?\n"
print "\nFAILED\n"
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
if re.search(forbidden_regexp, d) != None:
success = 0
print "Forbidden library: " + d
if success == 1:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
|
<commit_before><commit_msg>Add a tool to validate ecs library dependencies.
The embedded content_shell starting point for applications such as the
ChromeCast shell must not include dependencies that would increase its
memory footprint. This script automatically tests that it does not contain
undesired dependencies.
BUG=none
Review URL: https://codereview.chromium.org/58603003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@234716 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import getopt
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libcairo',
'libpango',
'libglib',
]
binary_target = 'content_shell'
def _main():
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
target = os.path.join(options.build_dir, binary_target)
else:
target = binary_target
forbidden_regexp = re.compile(string.join(kUndesiredLibraryList, '|'))
success = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
print "Failed to execute ldd to analyze dependencies for " + target + ':'
print ' ' + err
print "FAILED\n"
return 1
if out == '':
print "No output to scan for forbidden dependencies?\n"
print "\nFAILED\n"
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
if re.search(forbidden_regexp, d) != None:
success = 0
print "Forbidden library: " + d
if success == 1:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
|
|
7c249b245406198aa1c2bc38012f2ae404b757aa
|
backend/scripts/tiffconvert.py
|
backend/scripts/tiffconvert.py
|
#!/usr/bin/env python
from PIL import Image
import rethinkdb as r
from optparse import OptionParser
import sys
import os
import errno
def datafile_dir(mcdir, datafile_id):
pieces = datafile_id.split("-")
return os.path.join(mcdir, pieces[1][0:2], pieces[1][2:4])
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", dest="dir",
help="MCDIR location", type="string")
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify a port for RethinkDB."
sys.exit(1)
if options.dir is None:
print "You must specify the location of the MC file repository."
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
all_files = list(r.table('datafiles').run(conn))
for datafile in all_files:
#print "File: %s" % (datafile['name'])
name, ext = os.path.splitext(datafile['name'])
ext_lower = ext.lower()
if ext_lower == ".tif" or ext_lower == ".tiff":
if datafile['usesid'] != "":
continue
filedir = datafile_dir(options.dir, datafile['id'])
image_file = os.path.join(filedir, datafile['id'])
print " Opening: %s" % (image_file)
im = Image.open(image_file)
conversion_dir = os.path.join(filedir, ".conversion")
converted_file_path = os.path.join(conversion_dir, datafile['id'] + ".jpg")
if os.path.isfile(converted_file_path):
continue
mkdirp(conversion_dir)
if im.mode != 'RGB':
im = im.convert('RGB')
print "Converting file %s, id %s" % (datafile['name'], datafile['id'])
im.save(converted_file_path)
if __name__ == "__main__":
main()
|
Add utility to batch convert tiff files.
|
Add utility to batch convert tiff files.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add utility to batch convert tiff files.
|
#!/usr/bin/env python
from PIL import Image
import rethinkdb as r
from optparse import OptionParser
import sys
import os
import errno
def datafile_dir(mcdir, datafile_id):
pieces = datafile_id.split("-")
return os.path.join(mcdir, pieces[1][0:2], pieces[1][2:4])
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", dest="dir",
help="MCDIR location", type="string")
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify a port for RethinkDB."
sys.exit(1)
if options.dir is None:
print "You must specify the location of the MC file repository."
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
all_files = list(r.table('datafiles').run(conn))
for datafile in all_files:
#print "File: %s" % (datafile['name'])
name, ext = os.path.splitext(datafile['name'])
ext_lower = ext.lower()
if ext_lower == ".tif" or ext_lower == ".tiff":
if datafile['usesid'] != "":
continue
filedir = datafile_dir(options.dir, datafile['id'])
image_file = os.path.join(filedir, datafile['id'])
print " Opening: %s" % (image_file)
im = Image.open(image_file)
conversion_dir = os.path.join(filedir, ".conversion")
converted_file_path = os.path.join(conversion_dir, datafile['id'] + ".jpg")
if os.path.isfile(converted_file_path):
continue
mkdirp(conversion_dir)
if im.mode != 'RGB':
im = im.convert('RGB')
print "Converting file %s, id %s" % (datafile['name'], datafile['id'])
im.save(converted_file_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add utility to batch convert tiff files.<commit_after>
|
#!/usr/bin/env python
from PIL import Image
import rethinkdb as r
from optparse import OptionParser
import sys
import os
import errno
def datafile_dir(mcdir, datafile_id):
pieces = datafile_id.split("-")
return os.path.join(mcdir, pieces[1][0:2], pieces[1][2:4])
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", dest="dir",
help="MCDIR location", type="string")
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify a port for RethinkDB."
sys.exit(1)
if options.dir is None:
print "You must specify the location of the MC file repository."
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
all_files = list(r.table('datafiles').run(conn))
for datafile in all_files:
#print "File: %s" % (datafile['name'])
name, ext = os.path.splitext(datafile['name'])
ext_lower = ext.lower()
if ext_lower == ".tif" or ext_lower == ".tiff":
if datafile['usesid'] != "":
continue
filedir = datafile_dir(options.dir, datafile['id'])
image_file = os.path.join(filedir, datafile['id'])
print " Opening: %s" % (image_file)
im = Image.open(image_file)
conversion_dir = os.path.join(filedir, ".conversion")
converted_file_path = os.path.join(conversion_dir, datafile['id'] + ".jpg")
if os.path.isfile(converted_file_path):
continue
mkdirp(conversion_dir)
if im.mode != 'RGB':
im = im.convert('RGB')
print "Converting file %s, id %s" % (datafile['name'], datafile['id'])
im.save(converted_file_path)
if __name__ == "__main__":
main()
|
Add utility to batch convert tiff files.#!/usr/bin/env python
from PIL import Image
import rethinkdb as r
from optparse import OptionParser
import sys
import os
import errno
def datafile_dir(mcdir, datafile_id):
pieces = datafile_id.split("-")
return os.path.join(mcdir, pieces[1][0:2], pieces[1][2:4])
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", dest="dir",
help="MCDIR location", type="string")
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify a port for RethinkDB."
sys.exit(1)
if options.dir is None:
print "You must specify the location of the MC file repository."
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
all_files = list(r.table('datafiles').run(conn))
for datafile in all_files:
#print "File: %s" % (datafile['name'])
name, ext = os.path.splitext(datafile['name'])
ext_lower = ext.lower()
if ext_lower == ".tif" or ext_lower == ".tiff":
if datafile['usesid'] != "":
continue
filedir = datafile_dir(options.dir, datafile['id'])
image_file = os.path.join(filedir, datafile['id'])
print " Opening: %s" % (image_file)
im = Image.open(image_file)
conversion_dir = os.path.join(filedir, ".conversion")
converted_file_path = os.path.join(conversion_dir, datafile['id'] + ".jpg")
if os.path.isfile(converted_file_path):
continue
mkdirp(conversion_dir)
if im.mode != 'RGB':
im = im.convert('RGB')
print "Converting file %s, id %s" % (datafile['name'], datafile['id'])
im.save(converted_file_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add utility to batch convert tiff files.<commit_after>#!/usr/bin/env python
from PIL import Image
import rethinkdb as r
from optparse import OptionParser
import sys
import os
import errno
def datafile_dir(mcdir, datafile_id):
pieces = datafile_id.split("-")
return os.path.join(mcdir, pieces[1][0:2], pieces[1][2:4])
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", dest="dir",
help="MCDIR location", type="string")
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify a port for RethinkDB."
sys.exit(1)
if options.dir is None:
print "You must specify the location of the MC file repository."
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
all_files = list(r.table('datafiles').run(conn))
for datafile in all_files:
#print "File: %s" % (datafile['name'])
name, ext = os.path.splitext(datafile['name'])
ext_lower = ext.lower()
if ext_lower == ".tif" or ext_lower == ".tiff":
if datafile['usesid'] != "":
continue
filedir = datafile_dir(options.dir, datafile['id'])
image_file = os.path.join(filedir, datafile['id'])
print " Opening: %s" % (image_file)
im = Image.open(image_file)
conversion_dir = os.path.join(filedir, ".conversion")
converted_file_path = os.path.join(conversion_dir, datafile['id'] + ".jpg")
if os.path.isfile(converted_file_path):
continue
mkdirp(conversion_dir)
if im.mode != 'RGB':
im = im.convert('RGB')
print "Converting file %s, id %s" % (datafile['name'], datafile['id'])
im.save(converted_file_path)
if __name__ == "__main__":
main()
|
|
e98fc8302cb9f8d435b197cd3fc47db23e498929
|
skvideo/examples/measure.py
|
skvideo/examples/measure.py
|
from skvideo.io import VideoCapture
from skvideo.metrics import ssim, psnr, vifp
import sys
import json
filename1, filename2 = sys.argv[1], sys.argv[2]
cap1 = VideoCapture(filename1)
cap1.open()
print str(cap1.get_info())
cap2 = VideoCapture(filename2)
cap2.open()
print str(cap2.get_info())
def rgb_to_y(img):
return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
frame_num = 0
while True:
retval1, image1 = cap1.read()
retval2, image2 = cap2.read()
if not retval1 and not retval2:
break
elif not retval1 or not retval2:
print "error: input files have different number of frames"
break
if image1.shape != image2.shape:
print "error: input files have different resolutions"
y_image1 = rgb_to_y(image1)
y_image2 = rgb_to_y(image2)
psnr_metric = psnr.psnr(image1, image2)
ssim_metric = ssim.ssim(y_image1 / 255.0, y_image2 / 255.0)
vifp_metric = vifp.vifp_mscale(y_image1, y_image2)
print json.dumps({ "frame_num":frame_num, "psnr":psnr_metric, "ssim":ssim_metric, "vifp":vifp_metric })
frame_num += 1
cap1.release()
cap2.release()
print "done"
|
Add example that compares two files with every metric
|
Add example that compares two files with every metric
|
Python
|
bsd-3-clause
|
aizvorski/scikit-video
|
Add example that compares two files with every metric
|
from skvideo.io import VideoCapture
from skvideo.metrics import ssim, psnr, vifp
import sys
import json
filename1, filename2 = sys.argv[1], sys.argv[2]
cap1 = VideoCapture(filename1)
cap1.open()
print str(cap1.get_info())
cap2 = VideoCapture(filename2)
cap2.open()
print str(cap2.get_info())
def rgb_to_y(img):
return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
frame_num = 0
while True:
retval1, image1 = cap1.read()
retval2, image2 = cap2.read()
if not retval1 and not retval2:
break
elif not retval1 or not retval2:
print "error: input files have different number of frames"
break
if image1.shape != image2.shape:
print "error: input files have different resolutions"
y_image1 = rgb_to_y(image1)
y_image2 = rgb_to_y(image2)
psnr_metric = psnr.psnr(image1, image2)
ssim_metric = ssim.ssim(y_image1 / 255.0, y_image2 / 255.0)
vifp_metric = vifp.vifp_mscale(y_image1, y_image2)
print json.dumps({ "frame_num":frame_num, "psnr":psnr_metric, "ssim":ssim_metric, "vifp":vifp_metric })
frame_num += 1
cap1.release()
cap2.release()
print "done"
|
<commit_before><commit_msg>Add example that compares two files with every metric<commit_after>
|
from skvideo.io import VideoCapture
from skvideo.metrics import ssim, psnr, vifp
import sys
import json
filename1, filename2 = sys.argv[1], sys.argv[2]
cap1 = VideoCapture(filename1)
cap1.open()
print str(cap1.get_info())
cap2 = VideoCapture(filename2)
cap2.open()
print str(cap2.get_info())
def rgb_to_y(img):
return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
frame_num = 0
while True:
retval1, image1 = cap1.read()
retval2, image2 = cap2.read()
if not retval1 and not retval2:
break
elif not retval1 or not retval2:
print "error: input files have different number of frames"
break
if image1.shape != image2.shape:
print "error: input files have different resolutions"
y_image1 = rgb_to_y(image1)
y_image2 = rgb_to_y(image2)
psnr_metric = psnr.psnr(image1, image2)
ssim_metric = ssim.ssim(y_image1 / 255.0, y_image2 / 255.0)
vifp_metric = vifp.vifp_mscale(y_image1, y_image2)
print json.dumps({ "frame_num":frame_num, "psnr":psnr_metric, "ssim":ssim_metric, "vifp":vifp_metric })
frame_num += 1
cap1.release()
cap2.release()
print "done"
|
Add example that compares two files with every metricfrom skvideo.io import VideoCapture
from skvideo.metrics import ssim, psnr, vifp
import sys
import json
filename1, filename2 = sys.argv[1], sys.argv[2]
cap1 = VideoCapture(filename1)
cap1.open()
print str(cap1.get_info())
cap2 = VideoCapture(filename2)
cap2.open()
print str(cap2.get_info())
def rgb_to_y(img):
return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
frame_num = 0
while True:
retval1, image1 = cap1.read()
retval2, image2 = cap2.read()
if not retval1 and not retval2:
break
elif not retval1 or not retval2:
print "error: input files have different number of frames"
break
if image1.shape != image2.shape:
print "error: input files have different resolutions"
y_image1 = rgb_to_y(image1)
y_image2 = rgb_to_y(image2)
psnr_metric = psnr.psnr(image1, image2)
ssim_metric = ssim.ssim(y_image1 / 255.0, y_image2 / 255.0)
vifp_metric = vifp.vifp_mscale(y_image1, y_image2)
print json.dumps({ "frame_num":frame_num, "psnr":psnr_metric, "ssim":ssim_metric, "vifp":vifp_metric })
frame_num += 1
cap1.release()
cap2.release()
print "done"
|
<commit_before><commit_msg>Add example that compares two files with every metric<commit_after>from skvideo.io import VideoCapture
from skvideo.metrics import ssim, psnr, vifp
import sys
import json
filename1, filename2 = sys.argv[1], sys.argv[2]
cap1 = VideoCapture(filename1)
cap1.open()
print str(cap1.get_info())
cap2 = VideoCapture(filename2)
cap2.open()
print str(cap2.get_info())
def rgb_to_y(img):
return 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]
frame_num = 0
while True:
retval1, image1 = cap1.read()
retval2, image2 = cap2.read()
if not retval1 and not retval2:
break
elif not retval1 or not retval2:
print "error: input files have different number of frames"
break
if image1.shape != image2.shape:
print "error: input files have different resolutions"
y_image1 = rgb_to_y(image1)
y_image2 = rgb_to_y(image2)
psnr_metric = psnr.psnr(image1, image2)
ssim_metric = ssim.ssim(y_image1 / 255.0, y_image2 / 255.0)
vifp_metric = vifp.vifp_mscale(y_image1, y_image2)
print json.dumps({ "frame_num":frame_num, "psnr":psnr_metric, "ssim":ssim_metric, "vifp":vifp_metric })
frame_num += 1
cap1.release()
cap2.release()
print "done"
|
|
27810a25f7d11d95fac92138ad8c9af24cb65304
|
scripts/data_download/higher_education/create_all_files.py
|
scripts/data_download/higher_education/create_all_files.py
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/higher_education/create_files.py en/pt output_path\n"
exit()
for year in range(2009, 2015):
print "python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year)
# commands.getoutput("python scripts/data_download/higher_education/create_files.py "+sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3])
|
Add download all year to higher education.
|
Add download all year to higher education.
|
Python
|
mit
|
DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site
|
Add download all year to higher education.
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/higher_education/create_files.py en/pt output_path\n"
exit()
for year in range(2009, 2015):
print "python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year)
# commands.getoutput("python scripts/data_download/higher_education/create_files.py "+sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3])
|
<commit_before><commit_msg>Add download all year to higher education.<commit_after>
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/higher_education/create_files.py en/pt output_path\n"
exit()
for year in range(2009, 2015):
print "python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year)
# commands.getoutput("python scripts/data_download/higher_education/create_files.py "+sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3])
|
Add download all year to higher education.import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/higher_education/create_files.py en/pt output_path\n"
exit()
for year in range(2009, 2015):
print "python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year)
# commands.getoutput("python scripts/data_download/higher_education/create_files.py "+sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3])
|
<commit_before><commit_msg>Add download all year to higher education.<commit_after>import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/higher_education/create_files.py en/pt output_path\n"
exit()
for year in range(2009, 2015):
print "python scripts/data_download/higher_education/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year)
# commands.getoutput("python scripts/data_download/higher_education/create_files.py "+sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3])
|
|
11fc2b74ca8df0475f3d2a42be0a4c30d865f2e8
|
instance/eucalyptus_upgrade.py
|
instance/eucalyptus_upgrade.py
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper function to upgrade production instances from Dogwood to Eucalyptus.
"""
import logging
from instance.models.openedx_instance import OpenEdXInstance
from instance.tasks import spawn_appserver
logger = logging.getLogger(__name__)
def get_instances_to_upgrade():
"""Select instances that need to be upgraded to Eucalyptus."""
return OpenEdXInstance.objects.filter( # Select instances
active_appserver___status="running", # that are running
openedx_release__contains="dogwood", # on dogwood
use_ephemeral_databases=False, # and use persistent databases.
)
def upgrade_to_eucalyptus(instances):
"""Upgrade all OpenEdXInstances in the given iterable to Eucalyptus."""
for instance in instances:
instance.refresh_from_db()
logger.info("Upgrading instance %s to Eucalyptus...", instance)
instance.configuration_extra_settings += "\nCOMMON_EUCALYPTUS_UPGRADE: true\n"
instance.edx_platform_repository_url = "https://github.com/open-craft/edx-platform"
instance.edx_platform_commit = "opencraft-release/eucalyptus.1"
instance.configuration_source_repo_url = "https://github.com/open-craft/configuration"
instance.configuration_version = "opencraft-release/eucalyptus.1"
instance.openedx_release = "open-release/eucalyptus.1"
instance.save()
for appserver in instance.appserver_set.iterator():
appserver.terminate_vm()
spawn_appserver(instance.ref.pk, mark_active_on_success=True, num_attempts=1)
def clean_up_after_upgrade(instances):
"""Remove Eucalyptus upgrade flag from the instance configuration."""
for instance in instances:
instance.refresh_from_db()
instance.configuration_extra_settings = instance.configuration_extra_settings.replace(
"\nCOMMON_EUCALYPTUS_UPGRADE: true\n", ""
)
instance.save()
|
Add helper functions to upgrade production instances to Eucalyptus.
|
Add helper functions to upgrade production instances to Eucalyptus.
|
Python
|
agpl-3.0
|
open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft
|
Add helper functions to upgrade production instances to Eucalyptus.
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper function to upgrade production instances from Dogwood to Eucalyptus.
"""
import logging
from instance.models.openedx_instance import OpenEdXInstance
from instance.tasks import spawn_appserver
logger = logging.getLogger(__name__)
def get_instances_to_upgrade():
"""Select instances that need to be upgraded to Eucalyptus."""
return OpenEdXInstance.objects.filter( # Select instances
active_appserver___status="running", # that are running
openedx_release__contains="dogwood", # on dogwood
use_ephemeral_databases=False, # and use persistent databases.
)
def upgrade_to_eucalyptus(instances):
"""Upgrade all OpenEdXInstances in the given iterable to Eucalyptus."""
for instance in instances:
instance.refresh_from_db()
logger.info("Upgrading instance %s to Eucalyptus...", instance)
instance.configuration_extra_settings += "\nCOMMON_EUCALYPTUS_UPGRADE: true\n"
instance.edx_platform_repository_url = "https://github.com/open-craft/edx-platform"
instance.edx_platform_commit = "opencraft-release/eucalyptus.1"
instance.configuration_source_repo_url = "https://github.com/open-craft/configuration"
instance.configuration_version = "opencraft-release/eucalyptus.1"
instance.openedx_release = "open-release/eucalyptus.1"
instance.save()
for appserver in instance.appserver_set.iterator():
appserver.terminate_vm()
spawn_appserver(instance.ref.pk, mark_active_on_success=True, num_attempts=1)
def clean_up_after_upgrade(instances):
"""Remove Eucalyptus upgrade flag from the instance configuration."""
for instance in instances:
instance.refresh_from_db()
instance.configuration_extra_settings = instance.configuration_extra_settings.replace(
"\nCOMMON_EUCALYPTUS_UPGRADE: true\n", ""
)
instance.save()
|
<commit_before><commit_msg>Add helper functions to upgrade production instances to Eucalyptus.<commit_after>
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper function to upgrade production instances from Dogwood to Eucalyptus.
"""
import logging
from instance.models.openedx_instance import OpenEdXInstance
from instance.tasks import spawn_appserver
logger = logging.getLogger(__name__)
def get_instances_to_upgrade():
"""Select instances that need to be upgraded to Eucalyptus."""
return OpenEdXInstance.objects.filter( # Select instances
active_appserver___status="running", # that are running
openedx_release__contains="dogwood", # on dogwood
use_ephemeral_databases=False, # and use persistent databases.
)
def upgrade_to_eucalyptus(instances):
"""Upgrade all OpenEdXInstances in the given iterable to Eucalyptus."""
for instance in instances:
instance.refresh_from_db()
logger.info("Upgrading instance %s to Eucalyptus...", instance)
instance.configuration_extra_settings += "\nCOMMON_EUCALYPTUS_UPGRADE: true\n"
instance.edx_platform_repository_url = "https://github.com/open-craft/edx-platform"
instance.edx_platform_commit = "opencraft-release/eucalyptus.1"
instance.configuration_source_repo_url = "https://github.com/open-craft/configuration"
instance.configuration_version = "opencraft-release/eucalyptus.1"
instance.openedx_release = "open-release/eucalyptus.1"
instance.save()
for appserver in instance.appserver_set.iterator():
appserver.terminate_vm()
spawn_appserver(instance.ref.pk, mark_active_on_success=True, num_attempts=1)
def clean_up_after_upgrade(instances):
"""Remove Eucalyptus upgrade flag from the instance configuration."""
for instance in instances:
instance.refresh_from_db()
instance.configuration_extra_settings = instance.configuration_extra_settings.replace(
"\nCOMMON_EUCALYPTUS_UPGRADE: true\n", ""
)
instance.save()
|
Add helper functions to upgrade production instances to Eucalyptus.# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper function to upgrade production instances from Dogwood to Eucalyptus.
"""
import logging
from instance.models.openedx_instance import OpenEdXInstance
from instance.tasks import spawn_appserver
logger = logging.getLogger(__name__)
def get_instances_to_upgrade():
"""Select instances that need to be upgraded to Eucalyptus."""
return OpenEdXInstance.objects.filter( # Select instances
active_appserver___status="running", # that are running
openedx_release__contains="dogwood", # on dogwood
use_ephemeral_databases=False, # and use persistent databases.
)
def upgrade_to_eucalyptus(instances):
"""Upgrade all OpenEdXInstances in the given iterable to Eucalyptus."""
for instance in instances:
instance.refresh_from_db()
logger.info("Upgrading instance %s to Eucalyptus...", instance)
instance.configuration_extra_settings += "\nCOMMON_EUCALYPTUS_UPGRADE: true\n"
instance.edx_platform_repository_url = "https://github.com/open-craft/edx-platform"
instance.edx_platform_commit = "opencraft-release/eucalyptus.1"
instance.configuration_source_repo_url = "https://github.com/open-craft/configuration"
instance.configuration_version = "opencraft-release/eucalyptus.1"
instance.openedx_release = "open-release/eucalyptus.1"
instance.save()
for appserver in instance.appserver_set.iterator():
appserver.terminate_vm()
spawn_appserver(instance.ref.pk, mark_active_on_success=True, num_attempts=1)
def clean_up_after_upgrade(instances):
"""Remove Eucalyptus upgrade flag from the instance configuration."""
for instance in instances:
instance.refresh_from_db()
instance.configuration_extra_settings = instance.configuration_extra_settings.replace(
"\nCOMMON_EUCALYPTUS_UPGRADE: true\n", ""
)
instance.save()
|
<commit_before><commit_msg>Add helper functions to upgrade production instances to Eucalyptus.<commit_after># -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helper function to upgrade production instances from Dogwood to Eucalyptus.
"""
import logging
from instance.models.openedx_instance import OpenEdXInstance
from instance.tasks import spawn_appserver
logger = logging.getLogger(__name__)
def get_instances_to_upgrade():
"""Select instances that need to be upgraded to Eucalyptus."""
return OpenEdXInstance.objects.filter( # Select instances
active_appserver___status="running", # that are running
openedx_release__contains="dogwood", # on dogwood
use_ephemeral_databases=False, # and use persistent databases.
)
def upgrade_to_eucalyptus(instances):
"""Upgrade all OpenEdXInstances in the given iterable to Eucalyptus."""
for instance in instances:
instance.refresh_from_db()
logger.info("Upgrading instance %s to Eucalyptus...", instance)
instance.configuration_extra_settings += "\nCOMMON_EUCALYPTUS_UPGRADE: true\n"
instance.edx_platform_repository_url = "https://github.com/open-craft/edx-platform"
instance.edx_platform_commit = "opencraft-release/eucalyptus.1"
instance.configuration_source_repo_url = "https://github.com/open-craft/configuration"
instance.configuration_version = "opencraft-release/eucalyptus.1"
instance.openedx_release = "open-release/eucalyptus.1"
instance.save()
for appserver in instance.appserver_set.iterator():
appserver.terminate_vm()
spawn_appserver(instance.ref.pk, mark_active_on_success=True, num_attempts=1)
def clean_up_after_upgrade(instances):
"""Remove Eucalyptus upgrade flag from the instance configuration."""
for instance in instances:
instance.refresh_from_db()
instance.configuration_extra_settings = instance.configuration_extra_settings.replace(
"\nCOMMON_EUCALYPTUS_UPGRADE: true\n", ""
)
instance.save()
|
|
e8343a21ba302e986a51673b42cda054cfb2e32a
|
shivyc.py
|
shivyc.py
|
#!/usr/bin/env python3
"""Main executable for ShivyC compiler
"""
import argparse
def get_arguments():
"""Set up the argument parser and return an object storing the
argument values.
return - An object storing argument values, as returned by
argparse.parse_args()
"""
parser = argparse.ArgumentParser(description="Compile C files.")
# The C file to compile
parser.add_argument("file_name")
return parser.parse_args()
if __name__ == "__main__":
arguments = get_arguments()
|
Add argument parsing to main executable
|
Add argument parsing to main executable
|
Python
|
mit
|
ShivamSarodia/ShivyC,ShivamSarodia/ShivyC,ShivamSarodia/ShivyC
|
Add argument parsing to main executable
|
#!/usr/bin/env python3
"""Main executable for ShivyC compiler
"""
import argparse
def get_arguments():
"""Set up the argument parser and return an object storing the
argument values.
return - An object storing argument values, as returned by
argparse.parse_args()
"""
parser = argparse.ArgumentParser(description="Compile C files.")
# The C file to compile
parser.add_argument("file_name")
return parser.parse_args()
if __name__ == "__main__":
arguments = get_arguments()
|
<commit_before><commit_msg>Add argument parsing to main executable<commit_after>
|
#!/usr/bin/env python3
"""Main executable for ShivyC compiler
"""
import argparse
def get_arguments():
"""Set up the argument parser and return an object storing the
argument values.
return - An object storing argument values, as returned by
argparse.parse_args()
"""
parser = argparse.ArgumentParser(description="Compile C files.")
# The C file to compile
parser.add_argument("file_name")
return parser.parse_args()
if __name__ == "__main__":
arguments = get_arguments()
|
Add argument parsing to main executable#!/usr/bin/env python3
"""Main executable for ShivyC compiler
"""
import argparse
def get_arguments():
"""Set up the argument parser and return an object storing the
argument values.
return - An object storing argument values, as returned by
argparse.parse_args()
"""
parser = argparse.ArgumentParser(description="Compile C files.")
# The C file to compile
parser.add_argument("file_name")
return parser.parse_args()
if __name__ == "__main__":
arguments = get_arguments()
|
<commit_before><commit_msg>Add argument parsing to main executable<commit_after>#!/usr/bin/env python3
"""Main executable for ShivyC compiler
"""
import argparse
def get_arguments():
"""Set up the argument parser and return an object storing the
argument values.
return - An object storing argument values, as returned by
argparse.parse_args()
"""
parser = argparse.ArgumentParser(description="Compile C files.")
# The C file to compile
parser.add_argument("file_name")
return parser.parse_args()
if __name__ == "__main__":
arguments = get_arguments()
|
|
8f74688fdf73c0a61b30083a616aa5c097854e42
|
src/python/site/remove_arg.py
|
src/python/site/remove_arg.py
|
import sys
program, first, *rest = sys.argv
args = []
result = 1
for arg in rest:
if arg == first:
result = 0
else:
quoted = f'"{arg}"'
args.append(quoted)
print(" ".join(args))
sys.exit(result)
|
Add python script to remove an arg
|
Add python script to remove an arg
|
Python
|
mit
|
jalanb/jab,jalanb/jab
|
Add python script to remove an arg
|
import sys
program, first, *rest = sys.argv
args = []
result = 1
for arg in rest:
if arg == first:
result = 0
else:
quoted = f'"{arg}"'
args.append(quoted)
print(" ".join(args))
sys.exit(result)
|
<commit_before><commit_msg>Add python script to remove an arg<commit_after>
|
import sys
program, first, *rest = sys.argv
args = []
result = 1
for arg in rest:
if arg == first:
result = 0
else:
quoted = f'"{arg}"'
args.append(quoted)
print(" ".join(args))
sys.exit(result)
|
Add python script to remove an argimport sys
program, first, *rest = sys.argv
args = []
result = 1
for arg in rest:
if arg == first:
result = 0
else:
quoted = f'"{arg}"'
args.append(quoted)
print(" ".join(args))
sys.exit(result)
|
<commit_before><commit_msg>Add python script to remove an arg<commit_after>import sys
program, first, *rest = sys.argv
args = []
result = 1
for arg in rest:
if arg == first:
result = 0
else:
quoted = f'"{arg}"'
args.append(quoted)
print(" ".join(args))
sys.exit(result)
|
|
05d04680f3a25a86fa9d8ac3c232d8dfb538d67e
|
iscc_bench/utils.py
|
iscc_bench/utils.py
|
# -*- coding: utf-8 -*-
import io
import time
from functools import lru_cache
def timing(func):
"""Decorator to measure and print runtime of a function"""
def wrap(*args):
start = time.time()
ret = func(*args)
end = time.time()
print(f"{func.__name__} function took {(end - start)*1000.0:.3f} ms")
return ret
return wrap
@lru_cache(maxsize=500)
def load_text_file(fp):
return open(fp, "r", encoding="utf8").read()
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f
|
Add module with some generic utility functions
|
Add module with some generic utility functions
|
Python
|
bsd-2-clause
|
coblo/isccbench
|
Add module with some generic utility functions
|
# -*- coding: utf-8 -*-
import io
import time
from functools import lru_cache
def timing(func):
"""Decorator to measure and print runtime of a function"""
def wrap(*args):
start = time.time()
ret = func(*args)
end = time.time()
print(f"{func.__name__} function took {(end - start)*1000.0:.3f} ms")
return ret
return wrap
@lru_cache(maxsize=500)
def load_text_file(fp):
return open(fp, "r", encoding="utf8").read()
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f
|
<commit_before><commit_msg>Add module with some generic utility functions<commit_after>
|
# -*- coding: utf-8 -*-
import io
import time
from functools import lru_cache
def timing(func):
"""Decorator to measure and print runtime of a function"""
def wrap(*args):
start = time.time()
ret = func(*args)
end = time.time()
print(f"{func.__name__} function took {(end - start)*1000.0:.3f} ms")
return ret
return wrap
@lru_cache(maxsize=500)
def load_text_file(fp):
return open(fp, "r", encoding="utf8").read()
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f
|
Add module with some generic utility functions# -*- coding: utf-8 -*-
import io
import time
from functools import lru_cache
def timing(func):
"""Decorator to measure and print runtime of a function"""
def wrap(*args):
start = time.time()
ret = func(*args)
end = time.time()
print(f"{func.__name__} function took {(end - start)*1000.0:.3f} ms")
return ret
return wrap
@lru_cache(maxsize=500)
def load_text_file(fp):
return open(fp, "r", encoding="utf8").read()
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f
|
<commit_before><commit_msg>Add module with some generic utility functions<commit_after># -*- coding: utf-8 -*-
import io
import time
from functools import lru_cache
def timing(func):
"""Decorator to measure and print runtime of a function"""
def wrap(*args):
start = time.time()
ret = func(*args)
end = time.time()
print(f"{func.__name__} function took {(end - start)*1000.0:.3f} ms")
return ret
return wrap
@lru_cache(maxsize=500)
def load_text_file(fp):
return open(fp, "r", encoding="utf8").read()
def stream_binary(f):
"""
Create a data stream from a file path (str), raw bytes, or stream.
"""
if isinstance(f, str):
return open(f, "rb")
if isinstance(f, bytes):
return io.BytesIO(f)
if hasattr(f, "read"):
if hasattr(f, "seek"):
f.seek(0)
return f
|
|
f74051f18dc0ee76f8add85978893a1a42324c40
|
emotools/plays.py
|
emotools/plays.py
|
"""Functions to generate data about entities that can be used for
visualizations.
"""
from collections import Counter
import numpy as np
import re
def get_characters(speakerturns):
"""Return a list of characters based a list of speaker turns."""
characters = Counter()
for turn in speakerturns:
# more postprocessing required for character names (character names
# now sometimes include stage directions)
actor_string = turn['actor']
actor = extract_character_name(actor_string)
characters[actor] += 1
return characters
def extract_character_name(actor_str):
"""Returns the character name extracted from the input string."""
actor_str = actor_str.replace('(', '').replace(')', '')
actor_str = actor_str.replace('[', '').replace(']', '')
actor_str = actor_str.replace('van binnen', '')
parts = re.split('[.,]', actor_str)
return parts[0].strip()
def moving_average(a, n=3):
"""Calculate the moving average of array a and window size n."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def add_leading_and_trailing_zeros(a, n):
"""Return array a with n/2 leading and trailing zeros."""
zeros = [0] * (n/2)
res = np.append(zeros, a)
return np.append(res, zeros)
def r(a1, a2):
"""Calculate Jisk's r measure (not sure it makes sense."""
res = []
for i in range(len(a1)):
if not a1[i] == 0.0 or not a2[i] == 0:
res.append((a1[i]-a2[i])/(a1[i]+a2[i]))
else:
res.append(0.0)
return np.array(res)
def generate_tick_marks(speakerturns):
"""Generate tick marks for a list of speaker turns. Returns a tuple of two
lists of tick marks; one for acts and one for scenes.
"""
# TODO: also return labels for the tick marks
act_marks = []
scene_marks = []
current_act_id = ''
current_scene_id = ''
for turn in speakerturns:
scene_id = turn.parent.get('xml:id')
# The first scene of an act might not be marked as such in FoLiA (the
# parent of a speaker turn might be either an act or a scene).
if turn.parent.get('class') == 'scene':
act_id = turn.parent.parent.get('xml:id')
else:
act_id = scene_id
if not act_id == current_act_id:
act_marks.append((speakerturns.index(turn)+1))
current_act_id = act_id
if not scene_id == current_scene_id:
scene_marks.append((speakerturns.index(turn)+1))
current_scene_id = scene_id
return act_marks, scene_marks
|
Move entity extraction functions to emotools
|
Move entity extraction functions to emotools
The entity extraction functions defined in the generate_liwc_data.py
script were moved to the emotools module, because I want to use these
functions in the visualization app. The next step is to update the
generate_liwc_data script to reflect these changes.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Move entity extraction functions to emotools
The entity extraction functions defined in the generate_liwc_data.py
script were moved to the emotools module, because I want to use these
functions in the visualization app. The next step is to update the
generate_liwc_data script to reflect these changes.
|
"""Functions to generate data about entities that can be used for
visualizations.
"""
from collections import Counter
import numpy as np
import re
def get_characters(speakerturns):
"""Return a list of characters based a list of speaker turns."""
characters = Counter()
for turn in speakerturns:
# more postprocessing required for character names (character names
# now sometimes include stage directions)
actor_string = turn['actor']
actor = extract_character_name(actor_string)
characters[actor] += 1
return characters
def extract_character_name(actor_str):
"""Returns the character name extracted from the input string."""
actor_str = actor_str.replace('(', '').replace(')', '')
actor_str = actor_str.replace('[', '').replace(']', '')
actor_str = actor_str.replace('van binnen', '')
parts = re.split('[.,]', actor_str)
return parts[0].strip()
def moving_average(a, n=3):
"""Calculate the moving average of array a and window size n."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def add_leading_and_trailing_zeros(a, n):
"""Return array a with n/2 leading and trailing zeros."""
zeros = [0] * (n/2)
res = np.append(zeros, a)
return np.append(res, zeros)
def r(a1, a2):
"""Calculate Jisk's r measure (not sure it makes sense."""
res = []
for i in range(len(a1)):
if not a1[i] == 0.0 or not a2[i] == 0:
res.append((a1[i]-a2[i])/(a1[i]+a2[i]))
else:
res.append(0.0)
return np.array(res)
def generate_tick_marks(speakerturns):
"""Generate tick marks for a list of speaker turns. Returns a tuple of two
lists of tick marks; one for acts and one for scenes.
"""
# TODO: also return labels for the tick marks
act_marks = []
scene_marks = []
current_act_id = ''
current_scene_id = ''
for turn in speakerturns:
scene_id = turn.parent.get('xml:id')
# The first scene of an act might not be marked as such in FoLiA (the
# parent of a speaker turn might be either an act or a scene).
if turn.parent.get('class') == 'scene':
act_id = turn.parent.parent.get('xml:id')
else:
act_id = scene_id
if not act_id == current_act_id:
act_marks.append((speakerturns.index(turn)+1))
current_act_id = act_id
if not scene_id == current_scene_id:
scene_marks.append((speakerturns.index(turn)+1))
current_scene_id = scene_id
return act_marks, scene_marks
|
<commit_before><commit_msg>Move entity extraction functions to emotools
The entity extraction functions defined in the generate_liwc_data.py
script were moved to the emotools module, because I want to use these
functions in the visualization app. The next step is to update the
generate_liwc_data script to reflect these changes.<commit_after>
|
"""Functions to generate data about entities that can be used for
visualizations.
"""
from collections import Counter
import numpy as np
import re
def get_characters(speakerturns):
"""Return a list of characters based a list of speaker turns."""
characters = Counter()
for turn in speakerturns:
# more postprocessing required for character names (character names
# now sometimes include stage directions)
actor_string = turn['actor']
actor = extract_character_name(actor_string)
characters[actor] += 1
return characters
def extract_character_name(actor_str):
"""Returns the character name extracted from the input string."""
actor_str = actor_str.replace('(', '').replace(')', '')
actor_str = actor_str.replace('[', '').replace(']', '')
actor_str = actor_str.replace('van binnen', '')
parts = re.split('[.,]', actor_str)
return parts[0].strip()
def moving_average(a, n=3):
"""Calculate the moving average of array a and window size n."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def add_leading_and_trailing_zeros(a, n):
"""Return array a with n/2 leading and trailing zeros."""
zeros = [0] * (n/2)
res = np.append(zeros, a)
return np.append(res, zeros)
def r(a1, a2):
"""Calculate Jisk's r measure (not sure it makes sense."""
res = []
for i in range(len(a1)):
if not a1[i] == 0.0 or not a2[i] == 0:
res.append((a1[i]-a2[i])/(a1[i]+a2[i]))
else:
res.append(0.0)
return np.array(res)
def generate_tick_marks(speakerturns):
"""Generate tick marks for a list of speaker turns. Returns a tuple of two
lists of tick marks; one for acts and one for scenes.
"""
# TODO: also return labels for the tick marks
act_marks = []
scene_marks = []
current_act_id = ''
current_scene_id = ''
for turn in speakerturns:
scene_id = turn.parent.get('xml:id')
# The first scene of an act might not be marked as such in FoLiA (the
# parent of a speaker turn might be either an act or a scene).
if turn.parent.get('class') == 'scene':
act_id = turn.parent.parent.get('xml:id')
else:
act_id = scene_id
if not act_id == current_act_id:
act_marks.append((speakerturns.index(turn)+1))
current_act_id = act_id
if not scene_id == current_scene_id:
scene_marks.append((speakerturns.index(turn)+1))
current_scene_id = scene_id
return act_marks, scene_marks
|
Move entity extraction functions to emotools
The entity extraction functions defined in the generate_liwc_data.py
script were moved to the emotools module, because I want to use these
functions in the visualization app. The next step is to update the
generate_liwc_data script to reflect these changes."""Functions to generate data about entities that can be used for
visualizations.
"""
from collections import Counter
import numpy as np
import re
def get_characters(speakerturns):
"""Return a list of characters based a list of speaker turns."""
characters = Counter()
for turn in speakerturns:
# more postprocessing required for character names (character names
# now sometimes include stage directions)
actor_string = turn['actor']
actor = extract_character_name(actor_string)
characters[actor] += 1
return characters
def extract_character_name(actor_str):
"""Returns the character name extracted from the input string."""
actor_str = actor_str.replace('(', '').replace(')', '')
actor_str = actor_str.replace('[', '').replace(']', '')
actor_str = actor_str.replace('van binnen', '')
parts = re.split('[.,]', actor_str)
return parts[0].strip()
def moving_average(a, n=3):
"""Calculate the moving average of array a and window size n."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def add_leading_and_trailing_zeros(a, n):
"""Return array a with n/2 leading and trailing zeros."""
zeros = [0] * (n/2)
res = np.append(zeros, a)
return np.append(res, zeros)
def r(a1, a2):
"""Calculate Jisk's r measure (not sure it makes sense."""
res = []
for i in range(len(a1)):
if not a1[i] == 0.0 or not a2[i] == 0:
res.append((a1[i]-a2[i])/(a1[i]+a2[i]))
else:
res.append(0.0)
return np.array(res)
def generate_tick_marks(speakerturns):
"""Generate tick marks for a list of speaker turns. Returns a tuple of two
lists of tick marks; one for acts and one for scenes.
"""
# TODO: also return labels for the tick marks
act_marks = []
scene_marks = []
current_act_id = ''
current_scene_id = ''
for turn in speakerturns:
scene_id = turn.parent.get('xml:id')
# The first scene of an act might not be marked as such in FoLiA (the
# parent of a speaker turn might be either an act or a scene).
if turn.parent.get('class') == 'scene':
act_id = turn.parent.parent.get('xml:id')
else:
act_id = scene_id
if not act_id == current_act_id:
act_marks.append((speakerturns.index(turn)+1))
current_act_id = act_id
if not scene_id == current_scene_id:
scene_marks.append((speakerturns.index(turn)+1))
current_scene_id = scene_id
return act_marks, scene_marks
|
<commit_before><commit_msg>Move entity extraction functions to emotools
The entity extraction functions defined in the generate_liwc_data.py
script were moved to the emotools module, because I want to use these
functions in the visualization app. The next step is to update the
generate_liwc_data script to reflect these changes.<commit_after>"""Functions to generate data about entities that can be used for
visualizations.
"""
from collections import Counter
import numpy as np
import re
def get_characters(speakerturns):
"""Return a list of characters based a list of speaker turns."""
characters = Counter()
for turn in speakerturns:
# more postprocessing required for character names (character names
# now sometimes include stage directions)
actor_string = turn['actor']
actor = extract_character_name(actor_string)
characters[actor] += 1
return characters
def extract_character_name(actor_str):
"""Returns the character name extracted from the input string."""
actor_str = actor_str.replace('(', '').replace(')', '')
actor_str = actor_str.replace('[', '').replace(']', '')
actor_str = actor_str.replace('van binnen', '')
parts = re.split('[.,]', actor_str)
return parts[0].strip()
def moving_average(a, n=3):
"""Calculate the moving average of array a and window size n."""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def add_leading_and_trailing_zeros(a, n):
"""Return array a with n/2 leading and trailing zeros."""
zeros = [0] * (n/2)
res = np.append(zeros, a)
return np.append(res, zeros)
def r(a1, a2):
"""Calculate Jisk's r measure (not sure it makes sense."""
res = []
for i in range(len(a1)):
if not a1[i] == 0.0 or not a2[i] == 0:
res.append((a1[i]-a2[i])/(a1[i]+a2[i]))
else:
res.append(0.0)
return np.array(res)
def generate_tick_marks(speakerturns):
"""Generate tick marks for a list of speaker turns. Returns a tuple of two
lists of tick marks; one for acts and one for scenes.
"""
# TODO: also return labels for the tick marks
act_marks = []
scene_marks = []
current_act_id = ''
current_scene_id = ''
for turn in speakerturns:
scene_id = turn.parent.get('xml:id')
# The first scene of an act might not be marked as such in FoLiA (the
# parent of a speaker turn might be either an act or a scene).
if turn.parent.get('class') == 'scene':
act_id = turn.parent.parent.get('xml:id')
else:
act_id = scene_id
if not act_id == current_act_id:
act_marks.append((speakerturns.index(turn)+1))
current_act_id = act_id
if not scene_id == current_scene_id:
scene_marks.append((speakerturns.index(turn)+1))
current_scene_id = scene_id
return act_marks, scene_marks
|
|
f19a982d25e44762298fd6def44b9566b4e0d6e0
|
tests/test_build_model_rpm.py
|
tests/test_build_model_rpm.py
|
#! /usr/bin/python
from build_model_rpm import BuildModelRPM
from nose.tools import *
@raises(TypeError)
def test_fail_with_no_parameters():
BuildModelRPM(None)
@raises(TypeError)
def test_fail_with_one_parameter():
BuildModelRPM("hydrotrend")
def test_hydrotrend_version_none():
BuildModelRPM("hydrotrend", None)
def test_hydrotrend_version_head():
BuildModelRPM("hydrotrend", "head")
#def test_hydrotrend_tagged_version():
# BuildModelRPM("hydrotrend", "3.0.2")
|
Include nose tests for hydrotrend
|
Include nose tests for hydrotrend
|
Python
|
mit
|
csdms/rpm_models
|
Include nose tests for hydrotrend
|
#! /usr/bin/python
from build_model_rpm import BuildModelRPM
from nose.tools import *
@raises(TypeError)
def test_fail_with_no_parameters():
BuildModelRPM(None)
@raises(TypeError)
def test_fail_with_one_parameter():
BuildModelRPM("hydrotrend")
def test_hydrotrend_version_none():
BuildModelRPM("hydrotrend", None)
def test_hydrotrend_version_head():
BuildModelRPM("hydrotrend", "head")
#def test_hydrotrend_tagged_version():
# BuildModelRPM("hydrotrend", "3.0.2")
|
<commit_before><commit_msg>Include nose tests for hydrotrend<commit_after>
|
#! /usr/bin/python
from build_model_rpm import BuildModelRPM
from nose.tools import *
@raises(TypeError)
def test_fail_with_no_parameters():
BuildModelRPM(None)
@raises(TypeError)
def test_fail_with_one_parameter():
BuildModelRPM("hydrotrend")
def test_hydrotrend_version_none():
BuildModelRPM("hydrotrend", None)
def test_hydrotrend_version_head():
BuildModelRPM("hydrotrend", "head")
#def test_hydrotrend_tagged_version():
# BuildModelRPM("hydrotrend", "3.0.2")
|
Include nose tests for hydrotrend#! /usr/bin/python
from build_model_rpm import BuildModelRPM
from nose.tools import *
@raises(TypeError)
def test_fail_with_no_parameters():
BuildModelRPM(None)
@raises(TypeError)
def test_fail_with_one_parameter():
BuildModelRPM("hydrotrend")
def test_hydrotrend_version_none():
BuildModelRPM("hydrotrend", None)
def test_hydrotrend_version_head():
BuildModelRPM("hydrotrend", "head")
#def test_hydrotrend_tagged_version():
# BuildModelRPM("hydrotrend", "3.0.2")
|
<commit_before><commit_msg>Include nose tests for hydrotrend<commit_after>#! /usr/bin/python
from build_model_rpm import BuildModelRPM
from nose.tools import *
@raises(TypeError)
def test_fail_with_no_parameters():
BuildModelRPM(None)
@raises(TypeError)
def test_fail_with_one_parameter():
BuildModelRPM("hydrotrend")
def test_hydrotrend_version_none():
BuildModelRPM("hydrotrend", None)
def test_hydrotrend_version_head():
BuildModelRPM("hydrotrend", "head")
#def test_hydrotrend_tagged_version():
# BuildModelRPM("hydrotrend", "3.0.2")
|
|
7cea3bbcb844bd29ee14bb490217d990213620d0
|
scripts/make_changelog.py
|
scripts/make_changelog.py
|
#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
from collections import defaultdict
import jinja2
CHANGELOG = """
# Change Log
All notable changes to PyMT will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.iteritems() %}
## [{{ tag }}] {{ release_date[tag] }}
{% for section, changes in sections.iteritems() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']
def git_log(start=None, stop='HEAD'):
cmd = ['git', 'log', '--first-parent', '--merges', '--topo-order',
'--oneline']
if start:
cmd.append('{start}..{stop}'.format(start=start, stop=stop))
return subprocess.check_output(cmd).strip()
def git_tag():
return subprocess.check_output(['git', 'tag']).strip()
def git_tag_date(tag):
return subprocess.check_output(['git', 'show', tag,
'--pretty=%ci']).strip().split()[0]
def releases():
return git_tag().split(os.linesep)
def brief(start=None, stop='HEAD'):
changes = []
for change in git_log(start=start, stop=stop).split(os.linesep):
try:
changes.append(change[change.index(' ') + 1:-1])
except ValueError:
pass
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith('Merge'):
continue
if change.startswith('Add'):
group = 'Added'
elif change.startswith('Deprecate'):
group = 'Deprecated'
elif change.startswith('Remove'):
group = 'Removed'
elif change.startswith('Fix'):
group = 'Fixed'
elif change.startswith('Security'):
group = 'Security'
else:
group = 'Changed'
groups[group].append(change)
return groups
def main():
tags = releases()
tags.append('HEAD')
changelog = defaultdict(dict)
release_date = dict()
for start, stop in zip(tags[:-1], tags[1:]):
changelog[stop] = group_changes(brief(start=start, stop=stop))
release_date[stop] = git_tag_date(stop)
env = jinja2.Environment(loader=jinja2.DictLoader({'changelog': CHANGELOG}))
print(env.get_template('changelog').render(releases=changelog,
release_date=release_date))
if __name__ == '__main__':
sys.exit(main())
|
Add script to auto-generate a Change Log.
|
Add script to auto-generate a Change Log.
|
Python
|
mit
|
csdms/pymt,csdms/coupling,csdms/coupling
|
Add script to auto-generate a Change Log.
|
#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
from collections import defaultdict
import jinja2
CHANGELOG = """
# Change Log
All notable changes to PyMT will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.iteritems() %}
## [{{ tag }}] {{ release_date[tag] }}
{% for section, changes in sections.iteritems() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']
def git_log(start=None, stop='HEAD'):
cmd = ['git', 'log', '--first-parent', '--merges', '--topo-order',
'--oneline']
if start:
cmd.append('{start}..{stop}'.format(start=start, stop=stop))
return subprocess.check_output(cmd).strip()
def git_tag():
return subprocess.check_output(['git', 'tag']).strip()
def git_tag_date(tag):
return subprocess.check_output(['git', 'show', tag,
'--pretty=%ci']).strip().split()[0]
def releases():
return git_tag().split(os.linesep)
def brief(start=None, stop='HEAD'):
changes = []
for change in git_log(start=start, stop=stop).split(os.linesep):
try:
changes.append(change[change.index(' ') + 1:-1])
except ValueError:
pass
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith('Merge'):
continue
if change.startswith('Add'):
group = 'Added'
elif change.startswith('Deprecate'):
group = 'Deprecated'
elif change.startswith('Remove'):
group = 'Removed'
elif change.startswith('Fix'):
group = 'Fixed'
elif change.startswith('Security'):
group = 'Security'
else:
group = 'Changed'
groups[group].append(change)
return groups
def main():
tags = releases()
tags.append('HEAD')
changelog = defaultdict(dict)
release_date = dict()
for start, stop in zip(tags[:-1], tags[1:]):
changelog[stop] = group_changes(brief(start=start, stop=stop))
release_date[stop] = git_tag_date(stop)
env = jinja2.Environment(loader=jinja2.DictLoader({'changelog': CHANGELOG}))
print(env.get_template('changelog').render(releases=changelog,
release_date=release_date))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to auto-generate a Change Log.<commit_after>
|
#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
from collections import defaultdict
import jinja2
CHANGELOG = """
# Change Log
All notable changes to PyMT will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.iteritems() %}
## [{{ tag }}] {{ release_date[tag] }}
{% for section, changes in sections.iteritems() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']
def git_log(start=None, stop='HEAD'):
cmd = ['git', 'log', '--first-parent', '--merges', '--topo-order',
'--oneline']
if start:
cmd.append('{start}..{stop}'.format(start=start, stop=stop))
return subprocess.check_output(cmd).strip()
def git_tag():
return subprocess.check_output(['git', 'tag']).strip()
def git_tag_date(tag):
return subprocess.check_output(['git', 'show', tag,
'--pretty=%ci']).strip().split()[0]
def releases():
return git_tag().split(os.linesep)
def brief(start=None, stop='HEAD'):
changes = []
for change in git_log(start=start, stop=stop).split(os.linesep):
try:
changes.append(change[change.index(' ') + 1:-1])
except ValueError:
pass
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith('Merge'):
continue
if change.startswith('Add'):
group = 'Added'
elif change.startswith('Deprecate'):
group = 'Deprecated'
elif change.startswith('Remove'):
group = 'Removed'
elif change.startswith('Fix'):
group = 'Fixed'
elif change.startswith('Security'):
group = 'Security'
else:
group = 'Changed'
groups[group].append(change)
return groups
def main():
tags = releases()
tags.append('HEAD')
changelog = defaultdict(dict)
release_date = dict()
for start, stop in zip(tags[:-1], tags[1:]):
changelog[stop] = group_changes(brief(start=start, stop=stop))
release_date[stop] = git_tag_date(stop)
env = jinja2.Environment(loader=jinja2.DictLoader({'changelog': CHANGELOG}))
print(env.get_template('changelog').render(releases=changelog,
release_date=release_date))
if __name__ == '__main__':
sys.exit(main())
|
Add script to auto-generate a Change Log.#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
from collections import defaultdict
import jinja2
CHANGELOG = """
# Change Log
All notable changes to PyMT will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.iteritems() %}
## [{{ tag }}] {{ release_date[tag] }}
{% for section, changes in sections.iteritems() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']
def git_log(start=None, stop='HEAD'):
cmd = ['git', 'log', '--first-parent', '--merges', '--topo-order',
'--oneline']
if start:
cmd.append('{start}..{stop}'.format(start=start, stop=stop))
return subprocess.check_output(cmd).strip()
def git_tag():
return subprocess.check_output(['git', 'tag']).strip()
def git_tag_date(tag):
return subprocess.check_output(['git', 'show', tag,
'--pretty=%ci']).strip().split()[0]
def releases():
return git_tag().split(os.linesep)
def brief(start=None, stop='HEAD'):
changes = []
for change in git_log(start=start, stop=stop).split(os.linesep):
try:
changes.append(change[change.index(' ') + 1:-1])
except ValueError:
pass
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith('Merge'):
continue
if change.startswith('Add'):
group = 'Added'
elif change.startswith('Deprecate'):
group = 'Deprecated'
elif change.startswith('Remove'):
group = 'Removed'
elif change.startswith('Fix'):
group = 'Fixed'
elif change.startswith('Security'):
group = 'Security'
else:
group = 'Changed'
groups[group].append(change)
return groups
def main():
tags = releases()
tags.append('HEAD')
changelog = defaultdict(dict)
release_date = dict()
for start, stop in zip(tags[:-1], tags[1:]):
changelog[stop] = group_changes(brief(start=start, stop=stop))
release_date[stop] = git_tag_date(stop)
env = jinja2.Environment(loader=jinja2.DictLoader({'changelog': CHANGELOG}))
print(env.get_template('changelog').render(releases=changelog,
release_date=release_date))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to auto-generate a Change Log.<commit_after>#! /usr/bin/env python
from __future__ import print_function
import os
import sys
import subprocess
from collections import defaultdict
import jinja2
CHANGELOG = """
# Change Log
All notable changes to PyMT will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
This file was auto-generated using `scripts/make_changelog.py`.
{% for tag, sections in releases.iteritems() %}
## [{{ tag }}] {{ release_date[tag] }}
{% for section, changes in sections.iteritems() %}
### {{section}}
{% for change in changes -%}
* {{ change }}
{% endfor -%}
{% endfor -%}
{% endfor -%}
""".strip()
SECTIONS = ['Added', 'Changed', 'Deprecated', 'Removed', 'Fixed', 'Security']
def git_log(start=None, stop='HEAD'):
cmd = ['git', 'log', '--first-parent', '--merges', '--topo-order',
'--oneline']
if start:
cmd.append('{start}..{stop}'.format(start=start, stop=stop))
return subprocess.check_output(cmd).strip()
def git_tag():
return subprocess.check_output(['git', 'tag']).strip()
def git_tag_date(tag):
return subprocess.check_output(['git', 'show', tag,
'--pretty=%ci']).strip().split()[0]
def releases():
return git_tag().split(os.linesep)
def brief(start=None, stop='HEAD'):
changes = []
for change in git_log(start=start, stop=stop).split(os.linesep):
try:
changes.append(change[change.index(' ') + 1:-1])
except ValueError:
pass
return changes
def group_changes(changes):
groups = defaultdict(list)
for change in changes:
if change.startswith('Merge'):
continue
if change.startswith('Add'):
group = 'Added'
elif change.startswith('Deprecate'):
group = 'Deprecated'
elif change.startswith('Remove'):
group = 'Removed'
elif change.startswith('Fix'):
group = 'Fixed'
elif change.startswith('Security'):
group = 'Security'
else:
group = 'Changed'
groups[group].append(change)
return groups
def main():
tags = releases()
tags.append('HEAD')
changelog = defaultdict(dict)
release_date = dict()
for start, stop in zip(tags[:-1], tags[1:]):
changelog[stop] = group_changes(brief(start=start, stop=stop))
release_date[stop] = git_tag_date(stop)
env = jinja2.Environment(loader=jinja2.DictLoader({'changelog': CHANGELOG}))
print(env.get_template('changelog').render(releases=changelog,
release_date=release_date))
if __name__ == '__main__':
sys.exit(main())
|
|
086b3a92f8d9390d44d2d25cd236cfa2194a0364
|
bleh.py
|
bleh.py
|
from os import environ
class ConfigMeta(type):
def __new__(cls, class_name, parents, attrs, app_name=None):
cls._app_name = app_name.upper() if app_name else None
attrs = {a.upper(): attrs[a] for a in attrs if not a.startswith('_')}
return super().__new__(cls, class_name, parents, attrs)
def __getattribute__(cls, attr):
attr = attr.upper()
app_name = super().__getattribute__('_app_name')
env_attr = '_'.join([app_name, attr])
if app_name and env_attr in environ:
return environ[env_attr]
return super().__getattribute__(attr)
def __setattribute__(cls, attr, value):
print(attr)
super().__setattribute__(attr, value)
class BaseConfig(metaclass=ConfigMeta):
Hello = 10
class Config(BaseConfig, app_name='test'):
Hello = 12
GOODBYE = 14
print(Config.HELLO)
print(Config.GOODBYE)
print(Config.STUFF)
|
Add a little config test file (will do more later)
|
Add a little config test file (will do more later)
|
Python
|
mit
|
foxscotch/foxrollbot
|
Add a little config test file (will do more later)
|
from os import environ
class ConfigMeta(type):
def __new__(cls, class_name, parents, attrs, app_name=None):
cls._app_name = app_name.upper() if app_name else None
attrs = {a.upper(): attrs[a] for a in attrs if not a.startswith('_')}
return super().__new__(cls, class_name, parents, attrs)
def __getattribute__(cls, attr):
attr = attr.upper()
app_name = super().__getattribute__('_app_name')
env_attr = '_'.join([app_name, attr])
if app_name and env_attr in environ:
return environ[env_attr]
return super().__getattribute__(attr)
def __setattribute__(cls, attr, value):
print(attr)
super().__setattribute__(attr, value)
class BaseConfig(metaclass=ConfigMeta):
Hello = 10
class Config(BaseConfig, app_name='test'):
Hello = 12
GOODBYE = 14
print(Config.HELLO)
print(Config.GOODBYE)
print(Config.STUFF)
|
<commit_before><commit_msg>Add a little config test file (will do more later)<commit_after>
|
from os import environ
class ConfigMeta(type):
def __new__(cls, class_name, parents, attrs, app_name=None):
cls._app_name = app_name.upper() if app_name else None
attrs = {a.upper(): attrs[a] for a in attrs if not a.startswith('_')}
return super().__new__(cls, class_name, parents, attrs)
def __getattribute__(cls, attr):
attr = attr.upper()
app_name = super().__getattribute__('_app_name')
env_attr = '_'.join([app_name, attr])
if app_name and env_attr in environ:
return environ[env_attr]
return super().__getattribute__(attr)
def __setattribute__(cls, attr, value):
print(attr)
super().__setattribute__(attr, value)
class BaseConfig(metaclass=ConfigMeta):
Hello = 10
class Config(BaseConfig, app_name='test'):
Hello = 12
GOODBYE = 14
print(Config.HELLO)
print(Config.GOODBYE)
print(Config.STUFF)
|
Add a little config test file (will do more later)from os import environ
class ConfigMeta(type):
def __new__(cls, class_name, parents, attrs, app_name=None):
cls._app_name = app_name.upper() if app_name else None
attrs = {a.upper(): attrs[a] for a in attrs if not a.startswith('_')}
return super().__new__(cls, class_name, parents, attrs)
def __getattribute__(cls, attr):
attr = attr.upper()
app_name = super().__getattribute__('_app_name')
env_attr = '_'.join([app_name, attr])
if app_name and env_attr in environ:
return environ[env_attr]
return super().__getattribute__(attr)
def __setattribute__(cls, attr, value):
print(attr)
super().__setattribute__(attr, value)
class BaseConfig(metaclass=ConfigMeta):
Hello = 10
class Config(BaseConfig, app_name='test'):
Hello = 12
GOODBYE = 14
print(Config.HELLO)
print(Config.GOODBYE)
print(Config.STUFF)
|
<commit_before><commit_msg>Add a little config test file (will do more later)<commit_after>from os import environ
class ConfigMeta(type):
def __new__(cls, class_name, parents, attrs, app_name=None):
cls._app_name = app_name.upper() if app_name else None
attrs = {a.upper(): attrs[a] for a in attrs if not a.startswith('_')}
return super().__new__(cls, class_name, parents, attrs)
def __getattribute__(cls, attr):
attr = attr.upper()
app_name = super().__getattribute__('_app_name')
env_attr = '_'.join([app_name, attr])
if app_name and env_attr in environ:
return environ[env_attr]
return super().__getattribute__(attr)
def __setattribute__(cls, attr, value):
print(attr)
super().__setattribute__(attr, value)
class BaseConfig(metaclass=ConfigMeta):
Hello = 10
class Config(BaseConfig, app_name='test'):
Hello = 12
GOODBYE = 14
print(Config.HELLO)
print(Config.GOODBYE)
print(Config.STUFF)
|
|
017d33a8fdcf55272613550c5360a998f201ad3d
|
services/gunicorn_conf.py
|
services/gunicorn_conf.py
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 600
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
Revert "bump gunicorn max_requests to 600"
|
Revert "bump gunicorn max_requests to 600"
This reverts commit ffbfe0d6f2ca83346693a788b14562eb332d0cbd.
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 600
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
Revert "bump gunicorn max_requests to 600"
This reverts commit ffbfe0d6f2ca83346693a788b14562eb332d0cbd.
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
<commit_before>import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 600
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
<commit_msg>Revert "bump gunicorn max_requests to 600"
This reverts commit ffbfe0d6f2ca83346693a788b14562eb332d0cbd.<commit_after>
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 600
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
Revert "bump gunicorn max_requests to 600"
This reverts commit ffbfe0d6f2ca83346693a788b14562eb332d0cbd.import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
<commit_before>import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 600
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
<commit_msg>Revert "bump gunicorn max_requests to 600"
This reverts commit ffbfe0d6f2ca83346693a788b14562eb332d0cbd.<commit_after>import multiprocessing
preload_app = True
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
keepalive = 60
timeout = 900
max_requests = 120
# defaults to 30 sec, setting to 5 minutes to fight `GreenletExit`s
graceful_timeout = 5*60
# cryptically, setting forwarded_allow_ips (to the ip of the hqproxy0)
# gets gunicorn to set https on redirects when appropriate. See:
# http://docs.gunicorn.org/en/latest/configure.html#secure-scheme-headers
# http://docs.gunicorn.org/en/latest/configure.html#forwarded-allow-ips
forwarded_allow_ips = '10.176.162.109'
def post_fork(server, worker):
# hacky way to address gunicorn gevent requests hitting django too early before urls are loaded
# see: https://github.com/benoitc/gunicorn/issues/527#issuecomment-19601046
from django.core.urlresolvers import resolve
resolve('/')
|
44eb353b6efae0f0459b82e0ec7c171fc2ce9ba6
|
dependency-graph/unity-dump.py
|
dependency-graph/unity-dump.py
|
#!/usr/bin/env python
import argparse
import os
import re
def locate_header_file(fname, include_paths):
for p in include_paths:
fullname = p + '/' + fname
if os.path.exists(fullname):
return fullname
raise RuntimeError('File not found: %s' % fname)
def preprocess_file(fname, include_paths, already_included):
if fname in already_included:
return
already_included.add(fname)
local_include_paths = include_paths + [os.path.dirname(fname)]
try:
with open(fname, 'r') as f:
for line in f.readlines():
m = re.match(r'\s*#\s*include\s+"(.*)"', line)
if m is not None:
hname = locate_header_file(m.group(1), local_include_paths)
preprocess_file(hname, local_include_paths, already_included)
elif re.match(r'#pragma once', line):
pass
else:
print line.rstrip()
except RuntimeError as e:
raise RuntimeError(str(e) + ' in ' + fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', metavar='FILE', help='File to "preprocess" and dump to stdout')
parser.add_argument('-I', '--include-dir', action='append', default=['.'], metavar='DIR', help='Path(s) to search for local includes')
options = parser.parse_args()
options.fname = os.path.abspath(options.fname)
options.include_dir = [os.path.abspath(p) for p in options.include_dir]
preprocess_file(options.fname, options.include_dir, set())
|
Add a Python script to dump a "unity build" of any test program.
|
Add a Python script to dump a "unity build" of any test program.
This script works around the fact that I don't have a conforming C++17
compiler on my laptop (yet!). Using this script I can turn a ten-line
test program into a standalone "unity build" suitable for cutting and
pasting into Wandbox.
dependency-graph/unity-dump.py -I include test.cc > out.txt
|
Python
|
mit
|
Quuxplusone/from-scratch,Quuxplusone/from-scratch,Quuxplusone/from-scratch
|
Add a Python script to dump a "unity build" of any test program.
This script works around the fact that I don't have a conforming C++17
compiler on my laptop (yet!). Using this script I can turn a ten-line
test program into a standalone "unity build" suitable for cutting and
pasting into Wandbox.
dependency-graph/unity-dump.py -I include test.cc > out.txt
|
#!/usr/bin/env python
import argparse
import os
import re
def locate_header_file(fname, include_paths):
for p in include_paths:
fullname = p + '/' + fname
if os.path.exists(fullname):
return fullname
raise RuntimeError('File not found: %s' % fname)
def preprocess_file(fname, include_paths, already_included):
if fname in already_included:
return
already_included.add(fname)
local_include_paths = include_paths + [os.path.dirname(fname)]
try:
with open(fname, 'r') as f:
for line in f.readlines():
m = re.match(r'\s*#\s*include\s+"(.*)"', line)
if m is not None:
hname = locate_header_file(m.group(1), local_include_paths)
preprocess_file(hname, local_include_paths, already_included)
elif re.match(r'#pragma once', line):
pass
else:
print line.rstrip()
except RuntimeError as e:
raise RuntimeError(str(e) + ' in ' + fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', metavar='FILE', help='File to "preprocess" and dump to stdout')
parser.add_argument('-I', '--include-dir', action='append', default=['.'], metavar='DIR', help='Path(s) to search for local includes')
options = parser.parse_args()
options.fname = os.path.abspath(options.fname)
options.include_dir = [os.path.abspath(p) for p in options.include_dir]
preprocess_file(options.fname, options.include_dir, set())
|
<commit_before><commit_msg>Add a Python script to dump a "unity build" of any test program.
This script works around the fact that I don't have a conforming C++17
compiler on my laptop (yet!). Using this script I can turn a ten-line
test program into a standalone "unity build" suitable for cutting and
pasting into Wandbox.
dependency-graph/unity-dump.py -I include test.cc > out.txt<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import re
def locate_header_file(fname, include_paths):
for p in include_paths:
fullname = p + '/' + fname
if os.path.exists(fullname):
return fullname
raise RuntimeError('File not found: %s' % fname)
def preprocess_file(fname, include_paths, already_included):
if fname in already_included:
return
already_included.add(fname)
local_include_paths = include_paths + [os.path.dirname(fname)]
try:
with open(fname, 'r') as f:
for line in f.readlines():
m = re.match(r'\s*#\s*include\s+"(.*)"', line)
if m is not None:
hname = locate_header_file(m.group(1), local_include_paths)
preprocess_file(hname, local_include_paths, already_included)
elif re.match(r'#pragma once', line):
pass
else:
print line.rstrip()
except RuntimeError as e:
raise RuntimeError(str(e) + ' in ' + fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', metavar='FILE', help='File to "preprocess" and dump to stdout')
parser.add_argument('-I', '--include-dir', action='append', default=['.'], metavar='DIR', help='Path(s) to search for local includes')
options = parser.parse_args()
options.fname = os.path.abspath(options.fname)
options.include_dir = [os.path.abspath(p) for p in options.include_dir]
preprocess_file(options.fname, options.include_dir, set())
|
Add a Python script to dump a "unity build" of any test program.
This script works around the fact that I don't have a conforming C++17
compiler on my laptop (yet!). Using this script I can turn a ten-line
test program into a standalone "unity build" suitable for cutting and
pasting into Wandbox.
dependency-graph/unity-dump.py -I include test.cc > out.txt#!/usr/bin/env python
import argparse
import os
import re
def locate_header_file(fname, include_paths):
for p in include_paths:
fullname = p + '/' + fname
if os.path.exists(fullname):
return fullname
raise RuntimeError('File not found: %s' % fname)
def preprocess_file(fname, include_paths, already_included):
if fname in already_included:
return
already_included.add(fname)
local_include_paths = include_paths + [os.path.dirname(fname)]
try:
with open(fname, 'r') as f:
for line in f.readlines():
m = re.match(r'\s*#\s*include\s+"(.*)"', line)
if m is not None:
hname = locate_header_file(m.group(1), local_include_paths)
preprocess_file(hname, local_include_paths, already_included)
elif re.match(r'#pragma once', line):
pass
else:
print line.rstrip()
except RuntimeError as e:
raise RuntimeError(str(e) + ' in ' + fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', metavar='FILE', help='File to "preprocess" and dump to stdout')
parser.add_argument('-I', '--include-dir', action='append', default=['.'], metavar='DIR', help='Path(s) to search for local includes')
options = parser.parse_args()
options.fname = os.path.abspath(options.fname)
options.include_dir = [os.path.abspath(p) for p in options.include_dir]
preprocess_file(options.fname, options.include_dir, set())
|
<commit_before><commit_msg>Add a Python script to dump a "unity build" of any test program.
This script works around the fact that I don't have a conforming C++17
compiler on my laptop (yet!). Using this script I can turn a ten-line
test program into a standalone "unity build" suitable for cutting and
pasting into Wandbox.
dependency-graph/unity-dump.py -I include test.cc > out.txt<commit_after>#!/usr/bin/env python
import argparse
import os
import re
def locate_header_file(fname, include_paths):
for p in include_paths:
fullname = p + '/' + fname
if os.path.exists(fullname):
return fullname
raise RuntimeError('File not found: %s' % fname)
def preprocess_file(fname, include_paths, already_included):
if fname in already_included:
return
already_included.add(fname)
local_include_paths = include_paths + [os.path.dirname(fname)]
try:
with open(fname, 'r') as f:
for line in f.readlines():
m = re.match(r'\s*#\s*include\s+"(.*)"', line)
if m is not None:
hname = locate_header_file(m.group(1), local_include_paths)
preprocess_file(hname, local_include_paths, already_included)
elif re.match(r'#pragma once', line):
pass
else:
print line.rstrip()
except RuntimeError as e:
raise RuntimeError(str(e) + ' in ' + fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fname', metavar='FILE', help='File to "preprocess" and dump to stdout')
parser.add_argument('-I', '--include-dir', action='append', default=['.'], metavar='DIR', help='Path(s) to search for local includes')
options = parser.parse_args()
options.fname = os.path.abspath(options.fname)
options.include_dir = [os.path.abspath(p) for p in options.include_dir]
preprocess_file(options.fname, options.include_dir, set())
|
|
54bbc3ea51e1242b7553883e28e3e792b987063a
|
move_emails_to_folders.py
|
move_emails_to_folders.py
|
import logging
from csv_email import get_move_folder, type_accepts_message
from settings import get_csv_file_types, get_email_client, LOGGING_KWARGS
logger = logging.getLogger(__name__)
def get_message_folder_name(message, csv_file_types):
for csv_type in csv_file_types:
match_dict = type_accepts_message(message, csv_type)
if match_dict is None:
continue
return get_move_folder(csv_type, match_dict)
return None
def move_messages_from_inbox():
csv_file_types = get_csv_file_types()
if csv_file_types is None:
logger.error('CSV file types could not be read from `settings.yaml`.')
return False
with get_email_client() as email_client:
email_client.select_inbox()
folder_name_list = [name for name in email_client.loop_folder_names()]
for message, uid in email_client.loop_email_messages(True):
logger.debug('Message subject is %s.', message['Subject'])
folder_name = get_message_folder_name(message, csv_file_types)
logger.debug('Message folder is %s.', folder_name)
if folder_name not in folder_name_list:
continue
email_client.move_message(uid, folder_name)
return True
if '__main__' == __name__:
logging.basicConfig(**LOGGING_KWARGS)
move_messages_from_inbox()
|
Add move emails from inbox script
|
Add move emails from inbox script
|
Python
|
mit
|
AustralianAntarcticDataCentre/save_emails_to_files,AustralianAntarcticDataCentre/save_emails_to_files
|
Add move emails from inbox script
|
import logging
from csv_email import get_move_folder, type_accepts_message
from settings import get_csv_file_types, get_email_client, LOGGING_KWARGS
logger = logging.getLogger(__name__)
def get_message_folder_name(message, csv_file_types):
for csv_type in csv_file_types:
match_dict = type_accepts_message(message, csv_type)
if match_dict is None:
continue
return get_move_folder(csv_type, match_dict)
return None
def move_messages_from_inbox():
csv_file_types = get_csv_file_types()
if csv_file_types is None:
logger.error('CSV file types could not be read from `settings.yaml`.')
return False
with get_email_client() as email_client:
email_client.select_inbox()
folder_name_list = [name for name in email_client.loop_folder_names()]
for message, uid in email_client.loop_email_messages(True):
logger.debug('Message subject is %s.', message['Subject'])
folder_name = get_message_folder_name(message, csv_file_types)
logger.debug('Message folder is %s.', folder_name)
if folder_name not in folder_name_list:
continue
email_client.move_message(uid, folder_name)
return True
if '__main__' == __name__:
logging.basicConfig(**LOGGING_KWARGS)
move_messages_from_inbox()
|
<commit_before><commit_msg>Add move emails from inbox script<commit_after>
|
import logging
from csv_email import get_move_folder, type_accepts_message
from settings import get_csv_file_types, get_email_client, LOGGING_KWARGS
logger = logging.getLogger(__name__)
def get_message_folder_name(message, csv_file_types):
for csv_type in csv_file_types:
match_dict = type_accepts_message(message, csv_type)
if match_dict is None:
continue
return get_move_folder(csv_type, match_dict)
return None
def move_messages_from_inbox():
csv_file_types = get_csv_file_types()
if csv_file_types is None:
logger.error('CSV file types could not be read from `settings.yaml`.')
return False
with get_email_client() as email_client:
email_client.select_inbox()
folder_name_list = [name for name in email_client.loop_folder_names()]
for message, uid in email_client.loop_email_messages(True):
logger.debug('Message subject is %s.', message['Subject'])
folder_name = get_message_folder_name(message, csv_file_types)
logger.debug('Message folder is %s.', folder_name)
if folder_name not in folder_name_list:
continue
email_client.move_message(uid, folder_name)
return True
if '__main__' == __name__:
logging.basicConfig(**LOGGING_KWARGS)
move_messages_from_inbox()
|
Add move emails from inbox scriptimport logging
from csv_email import get_move_folder, type_accepts_message
from settings import get_csv_file_types, get_email_client, LOGGING_KWARGS
logger = logging.getLogger(__name__)
def get_message_folder_name(message, csv_file_types):
for csv_type in csv_file_types:
match_dict = type_accepts_message(message, csv_type)
if match_dict is None:
continue
return get_move_folder(csv_type, match_dict)
return None
def move_messages_from_inbox():
csv_file_types = get_csv_file_types()
if csv_file_types is None:
logger.error('CSV file types could not be read from `settings.yaml`.')
return False
with get_email_client() as email_client:
email_client.select_inbox()
folder_name_list = [name for name in email_client.loop_folder_names()]
for message, uid in email_client.loop_email_messages(True):
logger.debug('Message subject is %s.', message['Subject'])
folder_name = get_message_folder_name(message, csv_file_types)
logger.debug('Message folder is %s.', folder_name)
if folder_name not in folder_name_list:
continue
email_client.move_message(uid, folder_name)
return True
if '__main__' == __name__:
logging.basicConfig(**LOGGING_KWARGS)
move_messages_from_inbox()
|
<commit_before><commit_msg>Add move emails from inbox script<commit_after>import logging
from csv_email import get_move_folder, type_accepts_message
from settings import get_csv_file_types, get_email_client, LOGGING_KWARGS
logger = logging.getLogger(__name__)
def get_message_folder_name(message, csv_file_types):
for csv_type in csv_file_types:
match_dict = type_accepts_message(message, csv_type)
if match_dict is None:
continue
return get_move_folder(csv_type, match_dict)
return None
def move_messages_from_inbox():
csv_file_types = get_csv_file_types()
if csv_file_types is None:
logger.error('CSV file types could not be read from `settings.yaml`.')
return False
with get_email_client() as email_client:
email_client.select_inbox()
folder_name_list = [name for name in email_client.loop_folder_names()]
for message, uid in email_client.loop_email_messages(True):
logger.debug('Message subject is %s.', message['Subject'])
folder_name = get_message_folder_name(message, csv_file_types)
logger.debug('Message folder is %s.', folder_name)
if folder_name not in folder_name_list:
continue
email_client.move_message(uid, folder_name)
return True
if '__main__' == __name__:
logging.basicConfig(**LOGGING_KWARGS)
move_messages_from_inbox()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.