commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbae1c6af008b8ac48a06f98ee61a8d06bf45736
|
reduce_versions.py
|
reduce_versions.py
|
#!/usr/bin/env python
import os
import argparse
def _important_revisions(dependency_file):
old_version = None
for line in dependency_file:
# 18438 1.1.2 Biobase,multtest
(rev, version, deps) = line.strip().split('\t')
if version != old_version:
old_version = version
yield (rev, version, deps.split(','))
def get_version_at_rev(package, revision, archive_dir=None):
try:
with open(os.path.join(archive_dir, package + '_versions_full.txt'), 'r') as handle:
for line in handle:
if line.startswith(revision + '\t'):
return '\t'.join((
package,
line.strip().split('\t')[1],
'%s_%s_dependencies.txt' % (package, line.strip().split('\t')[1]),
'%s_%s.tar.gz' % (package, line.strip().split('\t')[1])
))
except Exception:
return "# Could not find %s at r%s" % (package, revision)
def main(dependency_file):
package_file = os.path.basename(dependency_file.name).replace('_versions_full.txt', '')
package_dir = os.path.dirname(dependency_file.name)
for (rev, version, deps) in _important_revisions(dependency_file):
version_deps_file = os.path.join(package_dir, '%s_%s_dependencies.txt' % (package_file, version))
versioned_deps = [get_version_at_rev(x, rev, archive_dir=package_dir)
for x in deps]
with open(version_deps_file, 'w') as output:
output.write('\n'.join(versioned_deps))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate easy to consume dependencies')
parser.add_argument('dependency_file', type=file,
help='PACKAGE_versions_full.txt file')
args = parser.parse_args()
main(**vars(args))
|
Reduce versions to easier requirements.txt style
|
Reduce versions to easier requirements.txt style
Given an `aRchive_output_path/PACKAGE_versions_full.txt` file, this
reduces it into the component
`aRchive_output_path/PACKAGE_VERSION_dependencies.txt` files, each which
look like:
Biobase 2.26.0 Biobase_2.26.0_dependencies.txt Biobase_2.26.0.tar.gz
# Could not find graphics at r95439
# Could not find grDevices at r95439
# Could not find methods at r95439
multtest 2.22.0 multtest_2.22.0_dependencies.txt multtest_2.22.0.tar.gz
# Could not find stats at r95439
# Could not find tcltk at r95439
# Could not find utils at r95439
Which has package, version, dependencies file, package url.
|
Python
|
mit
|
bioarchive/aRchive_source_code,bioarchive/aRchive_source_code,bioarchive/aRchive_source_code,bioarchive/aRchive_source_code
|
Reduce versions to easier requirements.txt style
Given an `aRchive_output_path/PACKAGE_versions_full.txt` file, this
reduces it into the component
`aRchive_output_path/PACKAGE_VERSION_dependencies.txt` files, each which
look like:
Biobase 2.26.0 Biobase_2.26.0_dependencies.txt Biobase_2.26.0.tar.gz
# Could not find graphics at r95439
# Could not find grDevices at r95439
# Could not find methods at r95439
multtest 2.22.0 multtest_2.22.0_dependencies.txt multtest_2.22.0.tar.gz
# Could not find stats at r95439
# Could not find tcltk at r95439
# Could not find utils at r95439
Which has package, version, dependencies file, package url.
|
#!/usr/bin/env python
import os
import argparse
def _important_revisions(dependency_file):
old_version = None
for line in dependency_file:
# 18438 1.1.2 Biobase,multtest
(rev, version, deps) = line.strip().split('\t')
if version != old_version:
old_version = version
yield (rev, version, deps.split(','))
def get_version_at_rev(package, revision, archive_dir=None):
try:
with open(os.path.join(archive_dir, package + '_versions_full.txt'), 'r') as handle:
for line in handle:
if line.startswith(revision + '\t'):
return '\t'.join((
package,
line.strip().split('\t')[1],
'%s_%s_dependencies.txt' % (package, line.strip().split('\t')[1]),
'%s_%s.tar.gz' % (package, line.strip().split('\t')[1])
))
except Exception:
return "# Could not find %s at r%s" % (package, revision)
def main(dependency_file):
package_file = os.path.basename(dependency_file.name).replace('_versions_full.txt', '')
package_dir = os.path.dirname(dependency_file.name)
for (rev, version, deps) in _important_revisions(dependency_file):
version_deps_file = os.path.join(package_dir, '%s_%s_dependencies.txt' % (package_file, version))
versioned_deps = [get_version_at_rev(x, rev, archive_dir=package_dir)
for x in deps]
with open(version_deps_file, 'w') as output:
output.write('\n'.join(versioned_deps))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate easy to consume dependencies')
parser.add_argument('dependency_file', type=file,
help='PACKAGE_versions_full.txt file')
args = parser.parse_args()
main(**vars(args))
|
<commit_before><commit_msg>Reduce versions to easier requirements.txt style
Given an `aRchive_output_path/PACKAGE_versions_full.txt` file, this
reduces it into the component
`aRchive_output_path/PACKAGE_VERSION_dependencies.txt` files, each which
look like:
Biobase 2.26.0 Biobase_2.26.0_dependencies.txt Biobase_2.26.0.tar.gz
# Could not find graphics at r95439
# Could not find grDevices at r95439
# Could not find methods at r95439
multtest 2.22.0 multtest_2.22.0_dependencies.txt multtest_2.22.0.tar.gz
# Could not find stats at r95439
# Could not find tcltk at r95439
# Could not find utils at r95439
Which has package, version, dependencies file, package url.<commit_after>
|
#!/usr/bin/env python
import os
import argparse
def _important_revisions(dependency_file):
old_version = None
for line in dependency_file:
# 18438 1.1.2 Biobase,multtest
(rev, version, deps) = line.strip().split('\t')
if version != old_version:
old_version = version
yield (rev, version, deps.split(','))
def get_version_at_rev(package, revision, archive_dir=None):
try:
with open(os.path.join(archive_dir, package + '_versions_full.txt'), 'r') as handle:
for line in handle:
if line.startswith(revision + '\t'):
return '\t'.join((
package,
line.strip().split('\t')[1],
'%s_%s_dependencies.txt' % (package, line.strip().split('\t')[1]),
'%s_%s.tar.gz' % (package, line.strip().split('\t')[1])
))
except Exception:
return "# Could not find %s at r%s" % (package, revision)
def main(dependency_file):
package_file = os.path.basename(dependency_file.name).replace('_versions_full.txt', '')
package_dir = os.path.dirname(dependency_file.name)
for (rev, version, deps) in _important_revisions(dependency_file):
version_deps_file = os.path.join(package_dir, '%s_%s_dependencies.txt' % (package_file, version))
versioned_deps = [get_version_at_rev(x, rev, archive_dir=package_dir)
for x in deps]
with open(version_deps_file, 'w') as output:
output.write('\n'.join(versioned_deps))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate easy to consume dependencies')
parser.add_argument('dependency_file', type=file,
help='PACKAGE_versions_full.txt file')
args = parser.parse_args()
main(**vars(args))
|
Reduce versions to easier requirements.txt style
Given an `aRchive_output_path/PACKAGE_versions_full.txt` file, this
reduces it into the component
`aRchive_output_path/PACKAGE_VERSION_dependencies.txt` files, each which
look like:
Biobase 2.26.0 Biobase_2.26.0_dependencies.txt Biobase_2.26.0.tar.gz
# Could not find graphics at r95439
# Could not find grDevices at r95439
# Could not find methods at r95439
multtest 2.22.0 multtest_2.22.0_dependencies.txt multtest_2.22.0.tar.gz
# Could not find stats at r95439
# Could not find tcltk at r95439
# Could not find utils at r95439
Which has package, version, dependencies file, package url.#!/usr/bin/env python
import os
import argparse
def _important_revisions(dependency_file):
old_version = None
for line in dependency_file:
# 18438 1.1.2 Biobase,multtest
(rev, version, deps) = line.strip().split('\t')
if version != old_version:
old_version = version
yield (rev, version, deps.split(','))
def get_version_at_rev(package, revision, archive_dir=None):
try:
with open(os.path.join(archive_dir, package + '_versions_full.txt'), 'r') as handle:
for line in handle:
if line.startswith(revision + '\t'):
return '\t'.join((
package,
line.strip().split('\t')[1],
'%s_%s_dependencies.txt' % (package, line.strip().split('\t')[1]),
'%s_%s.tar.gz' % (package, line.strip().split('\t')[1])
))
except Exception:
return "# Could not find %s at r%s" % (package, revision)
def main(dependency_file):
package_file = os.path.basename(dependency_file.name).replace('_versions_full.txt', '')
package_dir = os.path.dirname(dependency_file.name)
for (rev, version, deps) in _important_revisions(dependency_file):
version_deps_file = os.path.join(package_dir, '%s_%s_dependencies.txt' % (package_file, version))
versioned_deps = [get_version_at_rev(x, rev, archive_dir=package_dir)
for x in deps]
with open(version_deps_file, 'w') as output:
output.write('\n'.join(versioned_deps))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate easy to consume dependencies')
parser.add_argument('dependency_file', type=file,
help='PACKAGE_versions_full.txt file')
args = parser.parse_args()
main(**vars(args))
|
<commit_before><commit_msg>Reduce versions to easier requirements.txt style
Given an `aRchive_output_path/PACKAGE_versions_full.txt` file, this
reduces it into the component
`aRchive_output_path/PACKAGE_VERSION_dependencies.txt` files, each which
look like:
Biobase 2.26.0 Biobase_2.26.0_dependencies.txt Biobase_2.26.0.tar.gz
# Could not find graphics at r95439
# Could not find grDevices at r95439
# Could not find methods at r95439
multtest 2.22.0 multtest_2.22.0_dependencies.txt multtest_2.22.0.tar.gz
# Could not find stats at r95439
# Could not find tcltk at r95439
# Could not find utils at r95439
Which has package, version, dependencies file, package url.<commit_after>#!/usr/bin/env python
import os
import argparse
def _important_revisions(dependency_file):
old_version = None
for line in dependency_file:
# 18438 1.1.2 Biobase,multtest
(rev, version, deps) = line.strip().split('\t')
if version != old_version:
old_version = version
yield (rev, version, deps.split(','))
def get_version_at_rev(package, revision, archive_dir=None):
try:
with open(os.path.join(archive_dir, package + '_versions_full.txt'), 'r') as handle:
for line in handle:
if line.startswith(revision + '\t'):
return '\t'.join((
package,
line.strip().split('\t')[1],
'%s_%s_dependencies.txt' % (package, line.strip().split('\t')[1]),
'%s_%s.tar.gz' % (package, line.strip().split('\t')[1])
))
except Exception:
return "# Could not find %s at r%s" % (package, revision)
def main(dependency_file):
package_file = os.path.basename(dependency_file.name).replace('_versions_full.txt', '')
package_dir = os.path.dirname(dependency_file.name)
for (rev, version, deps) in _important_revisions(dependency_file):
version_deps_file = os.path.join(package_dir, '%s_%s_dependencies.txt' % (package_file, version))
versioned_deps = [get_version_at_rev(x, rev, archive_dir=package_dir)
for x in deps]
with open(version_deps_file, 'w') as output:
output.write('\n'.join(versioned_deps))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate easy to consume dependencies')
parser.add_argument('dependency_file', type=file,
help='PACKAGE_versions_full.txt file')
args = parser.parse_args()
main(**vars(args))
|
|
2c73a41ab78b41da7b6f2ccbd16140fa701d74f2
|
gunicorn/app/wsgiapp.py
|
gunicorn/app/wsgiapp.py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
try:
self.load()
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
def load(self):
return util.import_app(self.app_uri)
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
def load(self):
try:
return util.import_app(self.app_uri)
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
|
Load wsgi apps after reading the configuration.
|
Load wsgi apps after reading the configuration.
|
Python
|
mit
|
WSDC-NITWarangal/gunicorn,wong2/gunicorn,ccl0326/gunicorn,ephes/gunicorn,tempbottle/gunicorn,zhoucen/gunicorn,prezi/gunicorn,urbaniak/gunicorn,wong2/gunicorn,jamesblunt/gunicorn,gtrdotmcs/gunicorn,alex/gunicorn,keakon/gunicorn,jamesblunt/gunicorn,jamesblunt/gunicorn,gtrdotmcs/gunicorn,1stvamp/gunicorn,elelianghh/gunicorn,ccl0326/gunicorn,ammaraskar/gunicorn,gtrdotmcs/gunicorn,urbaniak/gunicorn,malept/gunicorn,pschanely/gunicorn,alex/gunicorn,ccl0326/gunicorn,prezi/gunicorn,GitHublong/gunicorn,zhoucen/gunicorn,prezi/gunicorn,wong2/gunicorn,pschanely/gunicorn,tejasmanohar/gunicorn,malept/gunicorn,pschanely/gunicorn,mvaled/gunicorn,alex/gunicorn,1stvamp/gunicorn,mvaled/gunicorn,MrKiven/gunicorn,urbaniak/gunicorn,harrisonfeng/gunicorn,zhoucen/gunicorn,beni55/gunicorn,z-fork/gunicorn,1stvamp/gunicorn,mvaled/gunicorn,malept/gunicorn
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
try:
self.load()
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
def load(self):
return util.import_app(self.app_uri)Load wsgi apps after reading the configuration.
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
def load(self):
try:
return util.import_app(self.app_uri)
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
|
<commit_before># -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
try:
self.load()
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
def load(self):
return util.import_app(self.app_uri)<commit_msg>Load wsgi apps after reading the configuration.<commit_after>
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
def load(self):
try:
return util.import_app(self.app_uri)
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
try:
self.load()
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
def load(self):
return util.import_app(self.app_uri)Load wsgi apps after reading the configuration.# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
def load(self):
try:
return util.import_app(self.app_uri)
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
|
<commit_before># -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
try:
self.load()
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
def load(self):
return util.import_app(self.app_uri)<commit_msg>Load wsgi apps after reading the configuration.<commit_after># -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.app.base import Application
class WSGIApplication(Application):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
sys.path.insert(0, os.getcwd())
def load(self):
try:
return util.import_app(self.app_uri)
except:
print "Failed to import application: %s" % self.app_uri
traceback.print_exc()
sys.exit(1)
|
d44f3ca6cecab959aeefe20b32df7e47c84f3828
|
molo/core/tests/test_commands.py
|
molo/core/tests/test_commands.py
|
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.management.commands.move_page_links_to_recomended_articles import convert_articles # noqa
body = [
{
"type": "paragraph",
"value": "paragraph 1"
},
{
"type": "page",
"value": 48
},
{
"type": "paragraph",
"value": "paragraph 1"
},
]
class TestCommands(MoloTestCaseMixin, TestCase):
'''
Test Cases:
- page links to RA
- end page links only
- existing Recommended Articles
'''
def setUp(self):
self.mk_main()
def test_convert_articles(self):
convert_articles()
|
Create test scaffold for command functions
|
Create test scaffold for command functions
|
Python
|
bsd-2-clause
|
praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo
|
Create test scaffold for command functions
|
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.management.commands.move_page_links_to_recomended_articles import convert_articles # noqa
body = [
{
"type": "paragraph",
"value": "paragraph 1"
},
{
"type": "page",
"value": 48
},
{
"type": "paragraph",
"value": "paragraph 1"
},
]
class TestCommands(MoloTestCaseMixin, TestCase):
'''
Test Cases:
- page links to RA
- end page links only
- existing Recommended Articles
'''
def setUp(self):
self.mk_main()
def test_convert_articles(self):
convert_articles()
|
<commit_before><commit_msg>Create test scaffold for command functions<commit_after>
|
from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.management.commands.move_page_links_to_recomended_articles import convert_articles # noqa
body = [
{
"type": "paragraph",
"value": "paragraph 1"
},
{
"type": "page",
"value": 48
},
{
"type": "paragraph",
"value": "paragraph 1"
},
]
class TestCommands(MoloTestCaseMixin, TestCase):
'''
Test Cases:
- page links to RA
- end page links only
- existing Recommended Articles
'''
def setUp(self):
self.mk_main()
def test_convert_articles(self):
convert_articles()
|
Create test scaffold for command functionsfrom django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.management.commands.move_page_links_to_recomended_articles import convert_articles # noqa
body = [
{
"type": "paragraph",
"value": "paragraph 1"
},
{
"type": "page",
"value": 48
},
{
"type": "paragraph",
"value": "paragraph 1"
},
]
class TestCommands(MoloTestCaseMixin, TestCase):
'''
Test Cases:
- page links to RA
- end page links only
- existing Recommended Articles
'''
def setUp(self):
self.mk_main()
def test_convert_articles(self):
convert_articles()
|
<commit_before><commit_msg>Create test scaffold for command functions<commit_after>from django.test import TestCase
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.management.commands.move_page_links_to_recomended_articles import convert_articles # noqa
body = [
{
"type": "paragraph",
"value": "paragraph 1"
},
{
"type": "page",
"value": 48
},
{
"type": "paragraph",
"value": "paragraph 1"
},
]
class TestCommands(MoloTestCaseMixin, TestCase):
'''
Test Cases:
- page links to RA
- end page links only
- existing Recommended Articles
'''
def setUp(self):
self.mk_main()
def test_convert_articles(self):
convert_articles()
|
|
a5d07496162d40e23c966f1cb3f340845f638c07
|
one_offs/pdf_repair/fix_pdfs.py
|
one_offs/pdf_repair/fix_pdfs.py
|
GEVENT = False
from regs_models import Doc
import json
import itertools
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
all_ids = json.load(open("/tmp/problems.json"))
for ids in split_seq(all_ids, 1000):
for doc in Doc.objects(id__in=ids):
for view in doc.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
for attachment in doc.attachments:
for view in attachment.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
doc.in_search_index = False
doc.in_cluster_db = False
doc.entities_last_extracted = None
print "Repaired %s" % doc.id
doc.save()
|
Reset script for incorrectly-extracted PDFs.
|
Reset script for incorrectly-extracted PDFs.
|
Python
|
bsd-3-clause
|
sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper
|
Reset script for incorrectly-extracted PDFs.
|
GEVENT = False
from regs_models import Doc
import json
import itertools
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
all_ids = json.load(open("/tmp/problems.json"))
for ids in split_seq(all_ids, 1000):
for doc in Doc.objects(id__in=ids):
for view in doc.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
for attachment in doc.attachments:
for view in attachment.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
doc.in_search_index = False
doc.in_cluster_db = False
doc.entities_last_extracted = None
print "Repaired %s" % doc.id
doc.save()
|
<commit_before><commit_msg>Reset script for incorrectly-extracted PDFs.<commit_after>
|
GEVENT = False
from regs_models import Doc
import json
import itertools
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
all_ids = json.load(open("/tmp/problems.json"))
for ids in split_seq(all_ids, 1000):
for doc in Doc.objects(id__in=ids):
for view in doc.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
for attachment in doc.attachments:
for view in attachment.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
doc.in_search_index = False
doc.in_cluster_db = False
doc.entities_last_extracted = None
print "Repaired %s" % doc.id
doc.save()
|
Reset script for incorrectly-extracted PDFs.GEVENT = False
from regs_models import Doc
import json
import itertools
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
all_ids = json.load(open("/tmp/problems.json"))
for ids in split_seq(all_ids, 1000):
for doc in Doc.objects(id__in=ids):
for view in doc.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
for attachment in doc.attachments:
for view in attachment.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
doc.in_search_index = False
doc.in_cluster_db = False
doc.entities_last_extracted = None
print "Repaired %s" % doc.id
doc.save()
|
<commit_before><commit_msg>Reset script for incorrectly-extracted PDFs.<commit_after>GEVENT = False
from regs_models import Doc
import json
import itertools
def split_seq(iterable, size):
it = iter(iterable)
item = list(itertools.islice(it, size))
while item:
yield item
item = list(itertools.islice(it, size))
all_ids = json.load(open("/tmp/problems.json"))
for ids in split_seq(all_ids, 1000):
for doc in Doc.objects(id__in=ids):
for view in doc.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
for attachment in doc.attachments:
for view in attachment.views:
if view.type == "pdf" and view.mode == "html" and view.extracted == "yes":
view.extracted = "no"
view.content.delete()
doc.in_search_index = False
doc.in_cluster_db = False
doc.entities_last_extracted = None
print "Repaired %s" % doc.id
doc.save()
|
|
c269f2e7a161ee0fdcafed2beb0883cabe1a960f
|
scripts/fix_system_tags.py
|
scripts/fix_system_tags.py
|
# -*- coding: utf-8 -*-
"""Add system tags that weren't added during the Toku->Postgres migration.
Pass a path to a JSON file that has node IDs as keys and lists of system tag names
as values.
"""
import sys
import logging
import json
from website.app import setup_django
setup_django()
from osf.models import AbstractNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main(dry=True):
systagfile = sys.argv[1]
with open(systagfile, 'r') as fp:
systag_data = json.load(fp)
for node_id, systags in systag_data.iteritems():
node = AbstractNode.load(node_id)
for systag in systags:
logger.info('Adding {} as a system tag to AbstractNode {}'.format(systag, node._id))
if not dry:
node.add_system_tag(systag, save=True)
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add script to add missing system tags
|
Add script to add missing system tags
|
Python
|
apache-2.0
|
CenterForOpenScience/osf.io,caneruguz/osf.io,pattisdr/osf.io,adlius/osf.io,aaxelb/osf.io,laurenrevere/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,felliott/osf.io,chrisseto/osf.io,adlius/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,mfraezz/osf.io,mfraezz/osf.io,erinspace/osf.io,caseyrollins/osf.io,mattclark/osf.io,Johnetordoff/osf.io,adlius/osf.io,cslzchen/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,felliott/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,leb2dg/osf.io,erinspace/osf.io,cwisecarver/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,saradbowman/osf.io,felliott/osf.io,aaxelb/osf.io,sloria/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,chrisseto/osf.io,laurenrevere/osf.io,binoculars/osf.io,icereval/osf.io,icereval/osf.io,sloria/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,crcresearch/osf.io,cslzchen/osf.io,aaxelb/osf.io,mattclark/osf.io,erinspace/osf.io,cwisecarver/osf.io,sloria/osf.io,mattclark/osf.io,felliott/osf.io,chennan47/osf.io,leb2dg/osf.io,chrisseto/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,chennan47/osf.io,cwisecarver/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,aaxelb/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,cslzchen/osf.io,binoculars/osf.io,baylee-d/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,TomBaxter/osf.io,pattisdr/osf.io,pattisdr/osf.io,cwisecarver/osf.io,chennan47/osf.io,adlius/osf.io
|
Add script to add missing system tags
|
# -*- coding: utf-8 -*-
"""Add system tags that weren't added during the Toku->Postgres migration.
Pass a path to a JSON file that has node IDs as keys and lists of system tag names
as values.
"""
import sys
import logging
import json
from website.app import setup_django
setup_django()
from osf.models import AbstractNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main(dry=True):
systagfile = sys.argv[1]
with open(systagfile, 'r') as fp:
systag_data = json.load(fp)
for node_id, systags in systag_data.iteritems():
node = AbstractNode.load(node_id)
for systag in systags:
logger.info('Adding {} as a system tag to AbstractNode {}'.format(systag, node._id))
if not dry:
node.add_system_tag(systag, save=True)
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add script to add missing system tags<commit_after>
|
# -*- coding: utf-8 -*-
"""Add system tags that weren't added during the Toku->Postgres migration.
Pass a path to a JSON file that has node IDs as keys and lists of system tag names
as values.
"""
import sys
import logging
import json
from website.app import setup_django
setup_django()
from osf.models import AbstractNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main(dry=True):
systagfile = sys.argv[1]
with open(systagfile, 'r') as fp:
systag_data = json.load(fp)
for node_id, systags in systag_data.iteritems():
node = AbstractNode.load(node_id)
for systag in systags:
logger.info('Adding {} as a system tag to AbstractNode {}'.format(systag, node._id))
if not dry:
node.add_system_tag(systag, save=True)
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
Add script to add missing system tags# -*- coding: utf-8 -*-
"""Add system tags that weren't added during the Toku->Postgres migration.
Pass a path to a JSON file that has node IDs as keys and lists of system tag names
as values.
"""
import sys
import logging
import json
from website.app import setup_django
setup_django()
from osf.models import AbstractNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main(dry=True):
systagfile = sys.argv[1]
with open(systagfile, 'r') as fp:
systag_data = json.load(fp)
for node_id, systags in systag_data.iteritems():
node = AbstractNode.load(node_id)
for systag in systags:
logger.info('Adding {} as a system tag to AbstractNode {}'.format(systag, node._id))
if not dry:
node.add_system_tag(systag, save=True)
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
<commit_before><commit_msg>Add script to add missing system tags<commit_after># -*- coding: utf-8 -*-
"""Add system tags that weren't added during the Toku->Postgres migration.
Pass a path to a JSON file that has node IDs as keys and lists of system tag names
as values.
"""
import sys
import logging
import json
from website.app import setup_django
setup_django()
from osf.models import AbstractNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main(dry=True):
systagfile = sys.argv[1]
with open(systagfile, 'r') as fp:
systag_data = json.load(fp)
for node_id, systags in systag_data.iteritems():
node = AbstractNode.load(node_id)
for systag in systags:
logger.info('Adding {} as a system tag to AbstractNode {}'.format(systag, node._id))
if not dry:
node.add_system_tag(systag, save=True)
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
|
d94398d6ce3c6e40735ae3b3b82a3d31a5718d78
|
analytics/app.py
|
analytics/app.py
|
from flask import Flask
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
app = Flask('test')
mongo = PyMongo(app)
@app.route('/')
def home_page():
test = mongo.db.events.find({})
view_this_suka = dumps(test)
return str(view_this_suka)
|
Set up with mongo: working
|
Set up with mongo: working
|
Python
|
mit
|
liakhandrii/genetic_cars,liakhandrii/genetic_cars
|
Set up with mongo: working
|
from flask import Flask
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
app = Flask('test')
mongo = PyMongo(app)
@app.route('/')
def home_page():
test = mongo.db.events.find({})
view_this_suka = dumps(test)
return str(view_this_suka)
|
<commit_before><commit_msg>Set up with mongo: working<commit_after>
|
from flask import Flask
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
app = Flask('test')
mongo = PyMongo(app)
@app.route('/')
def home_page():
test = mongo.db.events.find({})
view_this_suka = dumps(test)
return str(view_this_suka)
|
Set up with mongo: workingfrom flask import Flask
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
app = Flask('test')
mongo = PyMongo(app)
@app.route('/')
def home_page():
test = mongo.db.events.find({})
view_this_suka = dumps(test)
return str(view_this_suka)
|
<commit_before><commit_msg>Set up with mongo: working<commit_after>from flask import Flask
from flask.ext.pymongo import PyMongo
from bson.json_util import dumps
app = Flask('test')
mongo = PyMongo(app)
@app.route('/')
def home_page():
test = mongo.db.events.find({})
view_this_suka = dumps(test)
return str(view_this_suka)
|
|
95548af426e823687e03fdb3abcd5fb714c96cbe
|
watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py
|
watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py
|
"""Set name for Audit as part of backward compatibility
Revision ID: a86240e89a29
Revises: 3cfc94cecf4e
Create Date: 2017-12-21 13:00:09.278587
"""
# revision identifiers, used by Alembic.
revision = 'a86240e89a29'
down_revision = '3cfc94cecf4e'
from alembic import op
from sqlalchemy.orm import sessionmaker
from watcher.db.sqlalchemy import models
def upgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is None).all():
strategy_name = s.query(models.Strategy).filter_by(id=audit.strategy_id).one().name
audit.update({'name': strategy_name + '-' + str(audit.created_at)})
s.commit()
def downgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is not None).all():
audit.update({'name': None})
s.commit()
|
Fix watcher audit list command
|
Fix watcher audit list command
This patch set adds data migration version that fills noname audits
with name like strategy.name + '-' + audit.created_at.
Closes-Bug: #1738758
Change-Id: I1d65b3110166e9f64ce5b80a34672d24d629807d
|
Python
|
apache-2.0
|
openstack/watcher,stackforge/watcher,stackforge/watcher,openstack/watcher
|
Fix watcher audit list command
This patch set adds data migration version that fills noname audits
with name like strategy.name + '-' + audit.created_at.
Closes-Bug: #1738758
Change-Id: I1d65b3110166e9f64ce5b80a34672d24d629807d
|
"""Set name for Audit as part of backward compatibility
Revision ID: a86240e89a29
Revises: 3cfc94cecf4e
Create Date: 2017-12-21 13:00:09.278587
"""
# revision identifiers, used by Alembic.
revision = 'a86240e89a29'
down_revision = '3cfc94cecf4e'
from alembic import op
from sqlalchemy.orm import sessionmaker
from watcher.db.sqlalchemy import models
def upgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is None).all():
strategy_name = s.query(models.Strategy).filter_by(id=audit.strategy_id).one().name
audit.update({'name': strategy_name + '-' + str(audit.created_at)})
s.commit()
def downgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is not None).all():
audit.update({'name': None})
s.commit()
|
<commit_before><commit_msg>Fix watcher audit list command
This patch set adds data migration version that fills noname audits
with name like strategy.name + '-' + audit.created_at.
Closes-Bug: #1738758
Change-Id: I1d65b3110166e9f64ce5b80a34672d24d629807d<commit_after>
|
"""Set name for Audit as part of backward compatibility
Revision ID: a86240e89a29
Revises: 3cfc94cecf4e
Create Date: 2017-12-21 13:00:09.278587
"""
# revision identifiers, used by Alembic.
revision = 'a86240e89a29'
down_revision = '3cfc94cecf4e'
from alembic import op
from sqlalchemy.orm import sessionmaker
from watcher.db.sqlalchemy import models
def upgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is None).all():
strategy_name = s.query(models.Strategy).filter_by(id=audit.strategy_id).one().name
audit.update({'name': strategy_name + '-' + str(audit.created_at)})
s.commit()
def downgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is not None).all():
audit.update({'name': None})
s.commit()
|
Fix watcher audit list command
This patch set adds data migration version that fills noname audits
with name like strategy.name + '-' + audit.created_at.
Closes-Bug: #1738758
Change-Id: I1d65b3110166e9f64ce5b80a34672d24d629807d"""Set name for Audit as part of backward compatibility
Revision ID: a86240e89a29
Revises: 3cfc94cecf4e
Create Date: 2017-12-21 13:00:09.278587
"""
# revision identifiers, used by Alembic.
revision = 'a86240e89a29'
down_revision = '3cfc94cecf4e'
from alembic import op
from sqlalchemy.orm import sessionmaker
from watcher.db.sqlalchemy import models
def upgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is None).all():
strategy_name = s.query(models.Strategy).filter_by(id=audit.strategy_id).one().name
audit.update({'name': strategy_name + '-' + str(audit.created_at)})
s.commit()
def downgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is not None).all():
audit.update({'name': None})
s.commit()
|
<commit_before><commit_msg>Fix watcher audit list command
This patch set adds data migration version that fills noname audits
with name like strategy.name + '-' + audit.created_at.
Closes-Bug: #1738758
Change-Id: I1d65b3110166e9f64ce5b80a34672d24d629807d<commit_after>"""Set name for Audit as part of backward compatibility
Revision ID: a86240e89a29
Revises: 3cfc94cecf4e
Create Date: 2017-12-21 13:00:09.278587
"""
# revision identifiers, used by Alembic.
revision = 'a86240e89a29'
down_revision = '3cfc94cecf4e'
from alembic import op
from sqlalchemy.orm import sessionmaker
from watcher.db.sqlalchemy import models
def upgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is None).all():
strategy_name = s.query(models.Strategy).filter_by(id=audit.strategy_id).one().name
audit.update({'name': strategy_name + '-' + str(audit.created_at)})
s.commit()
def downgrade():
connection = op.get_bind()
session = sessionmaker()
s = session(bind=connection)
for audit in s.query(models.Audit).filter(models.Audit.name is not None).all():
audit.update({'name': None})
s.commit()
|
|
b6ec51bbf0a6d7ccb17e66d78f041594f5a9030d
|
tests/basics/dict-del.py
|
tests/basics/dict-del.py
|
for i in range(100):
d = dict()
for j in range(100):
d[j] = j
del d[i]
for j in range(100):
if j not in d:
print(j, 'not in d')
|
Add test for dict delete. It fails.
|
tests: Add test for dict delete. It fails.
|
Python
|
mit
|
tralamazza/micropython,mgyenik/micropython,martinribelotta/micropython,adafruit/micropython,tuc-osg/micropython,jmarcelino/pycom-micropython,MrSurly/micropython,puuu/micropython,heisewangluo/micropython,heisewangluo/micropython,feilongfl/micropython,turbinenreiter/micropython,torwag/micropython,MrSurly/micropython,TDAbboud/micropython,lbattraw/micropython,pramasoul/micropython,infinnovation/micropython,supergis/micropython,methoxid/micropystat,jmarcelino/pycom-micropython,galenhz/micropython,cloudformdesign/micropython,micropython/micropython-esp32,toolmacher/micropython,Vogtinator/micropython,lowRISC/micropython,SHA2017-badge/micropython-esp32,emfcamp/micropython,mpalomer/micropython,pfalcon/micropython,henriknelson/micropython,chrisdearman/micropython,cnoviello/micropython,xuxiaoxin/micropython,pozetroninc/micropython,MrSurly/micropython,martinribelotta/micropython,adafruit/circuitpython,mpalomer/micropython,xyb/micropython,rubencabrera/micropython,ruffy91/micropython,neilh10/micropython,paul-xxx/micropython,hiway/micropython,swegener/micropython,MrSurly/micropython,methoxid/micropystat,orionrobots/micropython,blazewicz/micropython,mianos/micropython,ernesto-g/micropython,cnoviello/micropython,cwyark/micropython,danicampora/micropython,TDAbboud/micropython,utopiaprince/micropython,lowRISC/micropython,SHA2017-badge/micropython-esp32,ceramos/micropython,kostyll/micropython,jlillest/micropython,MrSurly/micropython-esp32,dinau/micropython,jmarcelino/pycom-micropython,skybird6672/micropython,praemdonck/micropython,xuxiaoxin/micropython,martinribelotta/micropython,alex-robbins/micropython,ChuckM/micropython,misterdanb/micropython,hosaka/micropython,kerneltask/micropython,kostyll/micropython,pfalcon/micropython,dxxb/micropython,blmorris/micropython,adafruit/micropython,torwag/micropython,feilongfl/micropython,orionrobots/micropython,dhylands/micropython,swegener/micropython,xuxiaoxin/micropython,methoxid/micropystat,mhoffma/micropython,vriera/micropython,drrk/micropython,ChuckM/micropython,stonegithubs/micropython,tuc-osg/micropython,puuu/micropython,hiway/micropython,paul-xxx/micropython,aethaniel/micropython,jimkmc/micropython,praemdonck/micropython,xyb/micropython,stonegithubs/micropython,skybird6672/micropython,matthewelse/micropython,aitjcize/micropython,pramasoul/micropython,firstval/micropython,toolmacher/micropython,neilh10/micropython,cloudformdesign/micropython,firstval/micropython,emfcamp/micropython,slzatz/micropython,MrSurly/micropython,blmorris/micropython,EcmaXp/micropython,emfcamp/micropython,noahwilliamsson/micropython,chrisdearman/micropython,adafruit/circuitpython,mgyenik/micropython,cwyark/micropython,ahotam/micropython,ahotam/micropython,jimkmc/micropython,mpalomer/micropython,mianos/micropython,blazewicz/micropython,dmazzella/micropython,trezor/micropython,EcmaXp/micropython,misterdanb/micropython,AriZuu/micropython,oopy/micropython,deshipu/micropython,warner83/micropython,blazewicz/micropython,utopiaprince/micropython,ruffy91/micropython,Vogtinator/micropython,ericsnowcurrently/micropython,hiway/micropython,alex-robbins/micropython,Vogtinator/micropython,ernesto-g/micropython,cloudformdesign/micropython,mianos/micropython,stonegithubs/micropython,skybird6672/micropython,suda/micropython,utopiaprince/micropython,PappaPeppar/micropython,heisewangluo/micropython,EcmaXp/micropython,torwag/micropython,pozetroninc/micropython,stonegithubs/micropython,aethaniel/micropython,MrSurly/micropython-esp32,redbear/micropython,alex-march/micropython,hosaka/micropython,ahotam/micropython,dhylands/micropython,turbinenreiter/micropython,ChuckM/micropython,emfcamp/micropython,alex-robbins/micropython,noahchense/micropython,SungEun-Steve-Kim/test-mp,matthewelse/micropython,dxxb/micropython,kerneltask/micropython,SHA2017-badge/micropython-esp32,blazewicz/micropython,torwag/micropython,lbattraw/micropython,ahotam/micropython,ernesto-g/micropython,AriZuu/micropython,feilongfl/micropython,mianos/micropython,SHA2017-badge/micropython-esp32,aethaniel/micropython,trezor/micropython,cloudformdesign/micropython,praemdonck/micropython,infinnovation/micropython,supergis/micropython,vriera/micropython,skybird6672/micropython,Peetz0r/micropython-esp32,pramasoul/micropython,tuc-osg/micropython,jimkmc/micropython,galenhz/micropython,danicampora/micropython,skybird6672/micropython,galenhz/micropython,ceramos/micropython,blmorris/micropython,ruffy91/micropython,xuxiaoxin/micropython,AriZuu/micropython,noahwilliamsson/micropython,ernesto-g/micropython,ericsnowcurrently/micropython,tralamazza/micropython,tralamazza/micropython,heisewangluo/micropython,tdautc19841202/micropython,infinnovation/micropython,slzatz/micropython,redbear/micropython,tdautc19841202/micropython,misterdanb/micropython,EcmaXp/micropython,ChuckM/micropython,ganshun666/micropython,tobbad/micropython,trezor/micropython,bvernoux/micropython,kostyll/micropython,xyb/micropython,Timmenem/micropython,rubencabrera/micropython,mhoffma/micropython,danicampora/micropython,noahwilliamsson/micropython,xyb/micropython,swegener/micropython,matthewelse/micropython,matthewelse/micropython,SungEun-Steve-Kim/test-mp,KISSMonX/micropython,aethaniel/micropython,selste/micropython,MrSurly/micropython-esp32,adafruit/circuitpython,toolmacher/micropython,suda/micropython,emfcamp/micropython,EcmaXp/micropython,mgyenik/micropython,tuc-osg/micropython,AriZuu/micropython,dinau/micropython,henriknelson/micropython,bvernoux/micropython,tdautc19841202/micropython,jlillest/micropython,mianos/micropython,dmazzella/micropython,PappaPeppar/micropython,martinribelotta/micropython,cwyark/micropython,micropython/micropython-esp32,methoxid/micropystat,adafruit/micropython,swegener/micropython,pfalcon/micropython,micropython/micropython-esp32,chrisdearman/micropython,chrisdearman/micropython,KISSMonX/micropython,deshipu/micropython,deshipu/micropython,warner83/micropython,kostyll/micropython,ceramos/micropython,redbear/micropython,danicampora/micropython,ruffy91/micropython,adafruit/circuitpython,mpalomer/micropython,noahchense/micropython,tdautc19841202/micropython,pozetroninc/micropython,oopy/micropython,lowRISC/micropython,utopiaprince/micropython,feilongfl/micropython,alex-robbins/micropython,rubencabrera/micropython,slzatz/micropython,jimkmc/micropython,turbinenreiter/micropython,ruffy91/micropython,paul-xxx/micropython,galenhz/micropython,ernesto-g/micropython,alex-robbins/micropython,danicampora/micropython,suda/micropython,lowRISC/micropython,dinau/micropython,deshipu/micropython,warner83/micropython,blmorris/micropython,pramasoul/micropython,jlillest/micropython,tralamazza/micropython,alex-march/micropython,matthewelse/micropython,swegener/micropython,hosaka/micropython,infinnovation/micropython,slzatz/micropython,tobbad/micropython,redbear/micropython,oopy/micropython,SungEun-Steve-Kim/test-mp,micropython/micropython-esp32,adafruit/micropython,jmarcelino/pycom-micropython,toolmacher/micropython,noahwilliamsson/micropython,PappaPeppar/micropython,suda/micropython,HenrikSolver/micropython,tobbad/micropython,mhoffma/micropython,methoxid/micropystat,Peetz0r/micropython-esp32,hiway/micropython,dhylands/micropython,hosaka/micropython,warner83/micropython,tobbad/micropython,drrk/micropython,matthewelse/micropython,selste/micropython,supergis/micropython,galenhz/micropython,vriera/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,ryannathans/micropython,tobbad/micropython,omtinez/micropython,ryannathans/micropython,MrSurly/micropython-esp32,xuxiaoxin/micropython,mgyenik/micropython,pozetroninc/micropython,dxxb/micropython,HenrikSolver/micropython,cnoviello/micropython,henriknelson/micropython,firstval/micropython,KISSMonX/micropython,dhylands/micropython,cnoviello/micropython,AriZuu/micropython,ericsnowcurrently/micropython,jmarcelino/pycom-micropython,henriknelson/micropython,vitiral/micropython,ganshun666/micropython,henriknelson/micropython,ryannathans/micropython,adamkh/micropython,Vogtinator/micropython,infinnovation/micropython,xyb/micropython,jlillest/micropython,heisewangluo/micropython,noahwilliamsson/micropython,kerneltask/micropython,vitiral/micropython,hiway/micropython,pfalcon/micropython,lbattraw/micropython,neilh10/micropython,ganshun666/micropython,noahchense/micropython,omtinez/micropython,adamkh/micropython,ceramos/micropython,alex-march/micropython,blazewicz/micropython,aitjcize/micropython,praemdonck/micropython,Peetz0r/micropython-esp32,Timmenem/micropython,cwyark/micropython,drrk/micropython,noahchense/micropython,tdautc19841202/micropython,SungEun-Steve-Kim/test-mp,Peetz0r/micropython-esp32,warner83/micropython,neilh10/micropython,xhat/micropython,chrisdearman/micropython,Timmenem/micropython,dinau/micropython,adamkh/micropython,bvernoux/micropython,aitjcize/micropython,adamkh/micropython,selste/micropython,rubencabrera/micropython,torwag/micropython,cloudformdesign/micropython,lbattraw/micropython,supergis/micropython,ChuckM/micropython,selste/micropython,aitjcize/micropython,ganshun666/micropython,suda/micropython,Timmenem/micropython,HenrikSolver/micropython,MrSurly/micropython-esp32,TDAbboud/micropython,misterdanb/micropython,mgyenik/micropython,ceramos/micropython,firstval/micropython,rubencabrera/micropython,PappaPeppar/micropython,alex-march/micropython,puuu/micropython,trezor/micropython,vriera/micropython,hosaka/micropython,oopy/micropython,KISSMonX/micropython,toolmacher/micropython,omtinez/micropython,turbinenreiter/micropython,jlillest/micropython,misterdanb/micropython,adafruit/circuitpython,feilongfl/micropython,kerneltask/micropython,vitiral/micropython,lbattraw/micropython,dxxb/micropython,aethaniel/micropython,mpalomer/micropython,adafruit/circuitpython,utopiaprince/micropython,Peetz0r/micropython-esp32,puuu/micropython,dinau/micropython,vitiral/micropython,Timmenem/micropython,vitiral/micropython,orionrobots/micropython,pozetroninc/micropython,xhat/micropython,xhat/micropython,selste/micropython,xhat/micropython,adafruit/micropython,kerneltask/micropython,lowRISC/micropython,blmorris/micropython,bvernoux/micropython,SungEun-Steve-Kim/test-mp,redbear/micropython,dhylands/micropython,xhat/micropython,omtinez/micropython,ahotam/micropython,alex-march/micropython,Vogtinator/micropython,vriera/micropython,omtinez/micropython,supergis/micropython,adamkh/micropython,jimkmc/micropython,kostyll/micropython,orionrobots/micropython,dxxb/micropython,slzatz/micropython,TDAbboud/micropython,neilh10/micropython,ryannathans/micropython,paul-xxx/micropython,dmazzella/micropython,ryannathans/micropython,drrk/micropython,cwyark/micropython,pramasoul/micropython,TDAbboud/micropython,mhoffma/micropython,tuc-osg/micropython,mhoffma/micropython,oopy/micropython,KISSMonX/micropython,ericsnowcurrently/micropython,HenrikSolver/micropython,ganshun666/micropython,orionrobots/micropython,bvernoux/micropython,cnoviello/micropython,ericsnowcurrently/micropython,pfalcon/micropython,noahchense/micropython,paul-xxx/micropython,turbinenreiter/micropython,dmazzella/micropython,trezor/micropython,firstval/micropython,puuu/micropython,HenrikSolver/micropython,micropython/micropython-esp32,praemdonck/micropython,PappaPeppar/micropython,stonegithubs/micropython,martinribelotta/micropython,drrk/micropython
|
tests: Add test for dict delete. It fails.
|
for i in range(100):
d = dict()
for j in range(100):
d[j] = j
del d[i]
for j in range(100):
if j not in d:
print(j, 'not in d')
|
<commit_before><commit_msg>tests: Add test for dict delete. It fails.<commit_after>
|
for i in range(100):
d = dict()
for j in range(100):
d[j] = j
del d[i]
for j in range(100):
if j not in d:
print(j, 'not in d')
|
tests: Add test for dict delete. It fails.for i in range(100):
d = dict()
for j in range(100):
d[j] = j
del d[i]
for j in range(100):
if j not in d:
print(j, 'not in d')
|
<commit_before><commit_msg>tests: Add test for dict delete. It fails.<commit_after>for i in range(100):
d = dict()
for j in range(100):
d[j] = j
del d[i]
for j in range(100):
if j not in d:
print(j, 'not in d')
|
|
d4c432f8652b6884fcd02e3b532036a152f58b84
|
tests/test_exceptions.py
|
tests/test_exceptions.py
|
"""tests/test_exceptions.py.
Tests to ensure custom exceptions work and are formatted as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
def test_invalid_type_data():
try:
raise hug.exceptions.InvalidTypeData('not a good type')
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons is None
try:
raise hug.exceptions.InvalidTypeData('not a good type', [1, 2, 3])
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons == [1, 2, 3]
with pytest.raises(Exception):
try:
raise hug.exceptions.InvalidTypeData()
except hug.exceptions.InvalidTypeData as exception:
pass
|
Add test to define how new exceptions module should work
|
Add test to define how new exceptions module should work
|
Python
|
mit
|
MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,timothycrosley/hug
|
Add test to define how new exceptions module should work
|
"""tests/test_exceptions.py.
Tests to ensure custom exceptions work and are formatted as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
def test_invalid_type_data():
try:
raise hug.exceptions.InvalidTypeData('not a good type')
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons is None
try:
raise hug.exceptions.InvalidTypeData('not a good type', [1, 2, 3])
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons == [1, 2, 3]
with pytest.raises(Exception):
try:
raise hug.exceptions.InvalidTypeData()
except hug.exceptions.InvalidTypeData as exception:
pass
|
<commit_before><commit_msg>Add test to define how new exceptions module should work<commit_after>
|
"""tests/test_exceptions.py.
Tests to ensure custom exceptions work and are formatted as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
def test_invalid_type_data():
try:
raise hug.exceptions.InvalidTypeData('not a good type')
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons is None
try:
raise hug.exceptions.InvalidTypeData('not a good type', [1, 2, 3])
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons == [1, 2, 3]
with pytest.raises(Exception):
try:
raise hug.exceptions.InvalidTypeData()
except hug.exceptions.InvalidTypeData as exception:
pass
|
Add test to define how new exceptions module should work"""tests/test_exceptions.py.
Tests to ensure custom exceptions work and are formatted as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
def test_invalid_type_data():
try:
raise hug.exceptions.InvalidTypeData('not a good type')
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons is None
try:
raise hug.exceptions.InvalidTypeData('not a good type', [1, 2, 3])
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons == [1, 2, 3]
with pytest.raises(Exception):
try:
raise hug.exceptions.InvalidTypeData()
except hug.exceptions.InvalidTypeData as exception:
pass
|
<commit_before><commit_msg>Add test to define how new exceptions module should work<commit_after>"""tests/test_exceptions.py.
Tests to ensure custom exceptions work and are formatted as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
def test_invalid_type_data():
try:
raise hug.exceptions.InvalidTypeData('not a good type')
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons is None
try:
raise hug.exceptions.InvalidTypeData('not a good type', [1, 2, 3])
except hug.exceptions.InvalidTypeData as exception:
error = exception
assert error.message == 'not a good type'
assert error.reasons == [1, 2, 3]
with pytest.raises(Exception):
try:
raise hug.exceptions.InvalidTypeData()
except hug.exceptions.InvalidTypeData as exception:
pass
|
|
e54232b9cdd9c845f2d9386ed58cd3d4d25db701
|
auc_pr.py
|
auc_pr.py
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
import sys
import pandas
from pandas import DataFrame
from sklearn.metrics import auc, precision_recall_curve #average_precision_score
REAL_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'label']
RESULT_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'prob']
def check_format(real_df, result_df):
'''检查real.csv和result.csv的头格式'''
real_headers = real_df.columns.values.tolist()
if REAL_HEADERS != real_headers:
print('Error: real.csv has wrong headers!')
print(real_headers)
exit(1)
result_headers = result_df.columns.values.tolist()
if RESULT_HEADERS != result_headers:
print('Error: result.csv has wrong headers!')
print(result_headers)
exit(1)
def auc_pr(real_csv, result_csv):
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
real_df, result_df = pandas.read_csv(real_csv), pandas.read_csv(result_csv)
check_format(real_df, result_df)
label, prob = real_df['label'].values, result_df['prob'].values
for i in range(len(prob)):
prob[i] = round(prob[i], 4)
precision, recall, thresholds = precision_recall_curve(label, prob)
area = auc(recall, precision)
print(area)
if __name__ == "__main__":
#auc_pr(sys.argv[1], sys.argv[2])
auc_pr('real.csv', 'result.csv')
|
Add calculate AUC value of PR curve by sklearn
|
Add calculate AUC value of PR curve by sklearn
|
Python
|
mit
|
9468305/script
|
Add calculate AUC value of PR curve by sklearn
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
import sys
import pandas
from pandas import DataFrame
from sklearn.metrics import auc, precision_recall_curve #average_precision_score
REAL_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'label']
RESULT_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'prob']
def check_format(real_df, result_df):
'''检查real.csv和result.csv的头格式'''
real_headers = real_df.columns.values.tolist()
if REAL_HEADERS != real_headers:
print('Error: real.csv has wrong headers!')
print(real_headers)
exit(1)
result_headers = result_df.columns.values.tolist()
if RESULT_HEADERS != result_headers:
print('Error: result.csv has wrong headers!')
print(result_headers)
exit(1)
def auc_pr(real_csv, result_csv):
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
real_df, result_df = pandas.read_csv(real_csv), pandas.read_csv(result_csv)
check_format(real_df, result_df)
label, prob = real_df['label'].values, result_df['prob'].values
for i in range(len(prob)):
prob[i] = round(prob[i], 4)
precision, recall, thresholds = precision_recall_curve(label, prob)
area = auc(recall, precision)
print(area)
if __name__ == "__main__":
#auc_pr(sys.argv[1], sys.argv[2])
auc_pr('real.csv', 'result.csv')
|
<commit_before><commit_msg>Add calculate AUC value of PR curve by sklearn<commit_after>
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
import sys
import pandas
from pandas import DataFrame
from sklearn.metrics import auc, precision_recall_curve #average_precision_score
REAL_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'label']
RESULT_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'prob']
def check_format(real_df, result_df):
'''检查real.csv和result.csv的头格式'''
real_headers = real_df.columns.values.tolist()
if REAL_HEADERS != real_headers:
print('Error: real.csv has wrong headers!')
print(real_headers)
exit(1)
result_headers = result_df.columns.values.tolist()
if RESULT_HEADERS != result_headers:
print('Error: result.csv has wrong headers!')
print(result_headers)
exit(1)
def auc_pr(real_csv, result_csv):
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
real_df, result_df = pandas.read_csv(real_csv), pandas.read_csv(result_csv)
check_format(real_df, result_df)
label, prob = real_df['label'].values, result_df['prob'].values
for i in range(len(prob)):
prob[i] = round(prob[i], 4)
precision, recall, thresholds = precision_recall_curve(label, prob)
area = auc(recall, precision)
print(area)
if __name__ == "__main__":
#auc_pr(sys.argv[1], sys.argv[2])
auc_pr('real.csv', 'result.csv')
|
Add calculate AUC value of PR curve by sklearn#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
import sys
import pandas
from pandas import DataFrame
from sklearn.metrics import auc, precision_recall_curve #average_precision_score
REAL_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'label']
RESULT_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'prob']
def check_format(real_df, result_df):
'''检查real.csv和result.csv的头格式'''
real_headers = real_df.columns.values.tolist()
if REAL_HEADERS != real_headers:
print('Error: real.csv has wrong headers!')
print(real_headers)
exit(1)
result_headers = result_df.columns.values.tolist()
if RESULT_HEADERS != result_headers:
print('Error: result.csv has wrong headers!')
print(result_headers)
exit(1)
def auc_pr(real_csv, result_csv):
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
real_df, result_df = pandas.read_csv(real_csv), pandas.read_csv(result_csv)
check_format(real_df, result_df)
label, prob = real_df['label'].values, result_df['prob'].values
for i in range(len(prob)):
prob[i] = round(prob[i], 4)
precision, recall, thresholds = precision_recall_curve(label, prob)
area = auc(recall, precision)
print(area)
if __name__ == "__main__":
#auc_pr(sys.argv[1], sys.argv[2])
auc_pr('real.csv', 'result.csv')
|
<commit_before><commit_msg>Add calculate AUC value of PR curve by sklearn<commit_after>#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
import sys
import pandas
from pandas import DataFrame
from sklearn.metrics import auc, precision_recall_curve #average_precision_score
REAL_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'label']
RESULT_HEADERS = ['Flightno',
'FlightDepcode',
'FlightArrcode',
'PlannedDeptime',
'PlannedArrtime',
'prob']
def check_format(real_df, result_df):
'''检查real.csv和result.csv的头格式'''
real_headers = real_df.columns.values.tolist()
if REAL_HEADERS != real_headers:
print('Error: real.csv has wrong headers!')
print(real_headers)
exit(1)
result_headers = result_df.columns.values.tolist()
if RESULT_HEADERS != result_headers:
print('Error: result.csv has wrong headers!')
print(result_headers)
exit(1)
def auc_pr(real_csv, result_csv):
'''使用real.csv和result.csv表格数据,计算PR曲线的AUC值'''
real_df, result_df = pandas.read_csv(real_csv), pandas.read_csv(result_csv)
check_format(real_df, result_df)
label, prob = real_df['label'].values, result_df['prob'].values
for i in range(len(prob)):
prob[i] = round(prob[i], 4)
precision, recall, thresholds = precision_recall_curve(label, prob)
area = auc(recall, precision)
print(area)
if __name__ == "__main__":
#auc_pr(sys.argv[1], sys.argv[2])
auc_pr('real.csv', 'result.csv')
|
|
70d6a5c67730e17d8d766eb1e8e6f21eeeda7c04
|
cptm/folia_party_names.py
|
cptm/folia_party_names.py
|
import gzip
from lxml import etree
import logging
import argparse
import glob
from collections import Counter
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s',
level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(gzipped FoLiA XML files)')
args = parser.parse_args()
data_files = glob.glob('{}/*/data_folia/*.xml.gz'.format(args.dir_in))
num_speech = 0
num_speech_without_party = 0
parties = Counter()
for data_file in data_files:
word_tag = '{http://ilk.uvt.nl/FoLiA}w'
pos_tag = '{http://ilk.uvt.nl/FoLiA}pos'
lemma_tag = '{http://ilk.uvt.nl/FoLiA}lemma'
speech_tag = '{http://www.politicalmashup.nl}speech'
party_tag = '{http://www.politicalmashup.nl}party'
date_tag = '{http://purl.org/dc/elements/1.1/}date'
f = gzip.open(data_file)
context = etree.iterparse(f, events=('end',), tag=(speech_tag, date_tag),
huge_tree=True)
for event, elem in context:
if elem.tag == date_tag:
pass
if elem.tag == speech_tag:
num_speech += 1
party = elem.attrib.get(party_tag)
if party:
# prevent unwanted subdirectories to be created (happens
# when there is a / in the party name)
party = party.replace('/', '-')
parties[party] += 1
else:
num_speech_without_party += 1
del context
f.close()
print 'num speech,', num_speech
print 'num speech without party,', num_speech_without_party
for p, f in parties.most_common():
print p, f
speeches_found = sum(parties.values())
print 'speeches found,', speeches_found
print 'speeches without parties + speeches_found,', num_speech_without_party+speeches_found
|
Add script to check party/speeches data
|
Add script to check party/speeches data
Added a quick and dirty script to determine additional corpus statistics,
such as
- party names
- number of speeches found
- number of speeches without party
- number of speeches per party
because it seems that the folia data is far from complete.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to check party/speeches data
Added a quick and dirty script to determine additional corpus statistics,
such as
- party names
- number of speeches found
- number of speeches without party
- number of speeches per party
because it seems that the folia data is far from complete.
|
import gzip
from lxml import etree
import logging
import argparse
import glob
from collections import Counter
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s',
level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(gzipped FoLiA XML files)')
args = parser.parse_args()
data_files = glob.glob('{}/*/data_folia/*.xml.gz'.format(args.dir_in))
num_speech = 0
num_speech_without_party = 0
parties = Counter()
for data_file in data_files:
word_tag = '{http://ilk.uvt.nl/FoLiA}w'
pos_tag = '{http://ilk.uvt.nl/FoLiA}pos'
lemma_tag = '{http://ilk.uvt.nl/FoLiA}lemma'
speech_tag = '{http://www.politicalmashup.nl}speech'
party_tag = '{http://www.politicalmashup.nl}party'
date_tag = '{http://purl.org/dc/elements/1.1/}date'
f = gzip.open(data_file)
context = etree.iterparse(f, events=('end',), tag=(speech_tag, date_tag),
huge_tree=True)
for event, elem in context:
if elem.tag == date_tag:
pass
if elem.tag == speech_tag:
num_speech += 1
party = elem.attrib.get(party_tag)
if party:
# prevent unwanted subdirectories to be created (happens
# when there is a / in the party name)
party = party.replace('/', '-')
parties[party] += 1
else:
num_speech_without_party += 1
del context
f.close()
print 'num speech,', num_speech
print 'num speech without party,', num_speech_without_party
for p, f in parties.most_common():
print p, f
speeches_found = sum(parties.values())
print 'speeches found,', speeches_found
print 'speeches without parties + speeches_found,', num_speech_without_party+speeches_found
|
<commit_before><commit_msg>Add script to check party/speeches data
Added a quick and dirty script to determine additional corpus statistics,
such as
- party names
- number of speeches found
- number of speeches without party
- number of speeches per party
because it seems that the folia data is far from complete.<commit_after>
|
import gzip
from lxml import etree
import logging
import argparse
import glob
from collections import Counter
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s',
level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(gzipped FoLiA XML files)')
args = parser.parse_args()
data_files = glob.glob('{}/*/data_folia/*.xml.gz'.format(args.dir_in))
num_speech = 0
num_speech_without_party = 0
parties = Counter()
for data_file in data_files:
word_tag = '{http://ilk.uvt.nl/FoLiA}w'
pos_tag = '{http://ilk.uvt.nl/FoLiA}pos'
lemma_tag = '{http://ilk.uvt.nl/FoLiA}lemma'
speech_tag = '{http://www.politicalmashup.nl}speech'
party_tag = '{http://www.politicalmashup.nl}party'
date_tag = '{http://purl.org/dc/elements/1.1/}date'
f = gzip.open(data_file)
context = etree.iterparse(f, events=('end',), tag=(speech_tag, date_tag),
huge_tree=True)
for event, elem in context:
if elem.tag == date_tag:
pass
if elem.tag == speech_tag:
num_speech += 1
party = elem.attrib.get(party_tag)
if party:
# prevent unwanted subdirectories to be created (happens
# when there is a / in the party name)
party = party.replace('/', '-')
parties[party] += 1
else:
num_speech_without_party += 1
del context
f.close()
print 'num speech,', num_speech
print 'num speech without party,', num_speech_without_party
for p, f in parties.most_common():
print p, f
speeches_found = sum(parties.values())
print 'speeches found,', speeches_found
print 'speeches without parties + speeches_found,', num_speech_without_party+speeches_found
|
Add script to check party/speeches data
Added a quick and dirty script to determine additional corpus statistics,
such as
- party names
- number of speeches found
- number of speeches without party
- number of speeches per party
because it seems that the folia data is far from complete.import gzip
from lxml import etree
import logging
import argparse
import glob
from collections import Counter
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s',
level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(gzipped FoLiA XML files)')
args = parser.parse_args()
data_files = glob.glob('{}/*/data_folia/*.xml.gz'.format(args.dir_in))
num_speech = 0
num_speech_without_party = 0
parties = Counter()
for data_file in data_files:
word_tag = '{http://ilk.uvt.nl/FoLiA}w'
pos_tag = '{http://ilk.uvt.nl/FoLiA}pos'
lemma_tag = '{http://ilk.uvt.nl/FoLiA}lemma'
speech_tag = '{http://www.politicalmashup.nl}speech'
party_tag = '{http://www.politicalmashup.nl}party'
date_tag = '{http://purl.org/dc/elements/1.1/}date'
f = gzip.open(data_file)
context = etree.iterparse(f, events=('end',), tag=(speech_tag, date_tag),
huge_tree=True)
for event, elem in context:
if elem.tag == date_tag:
pass
if elem.tag == speech_tag:
num_speech += 1
party = elem.attrib.get(party_tag)
if party:
# prevent unwanted subdirectories to be created (happens
# when there is a / in the party name)
party = party.replace('/', '-')
parties[party] += 1
else:
num_speech_without_party += 1
del context
f.close()
print 'num speech,', num_speech
print 'num speech without party,', num_speech_without_party
for p, f in parties.most_common():
print p, f
speeches_found = sum(parties.values())
print 'speeches found,', speeches_found
print 'speeches without parties + speeches_found,', num_speech_without_party+speeches_found
|
<commit_before><commit_msg>Add script to check party/speeches data
Added a quick and dirty script to determine additional corpus statistics,
such as
- party names
- number of speeches found
- number of speeches without party
- number of speeches per party
because it seems that the folia data is far from complete.<commit_after>import gzip
from lxml import etree
import logging
import argparse
import glob
from collections import Counter
if __name__ == '__main__':
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s',
level=logging.INFO)
logger.setLevel(logging.DEBUG)
logging.getLogger('inputgeneration').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('dir_in', help='directory containing the data '
'(gzipped FoLiA XML files)')
args = parser.parse_args()
data_files = glob.glob('{}/*/data_folia/*.xml.gz'.format(args.dir_in))
num_speech = 0
num_speech_without_party = 0
parties = Counter()
for data_file in data_files:
word_tag = '{http://ilk.uvt.nl/FoLiA}w'
pos_tag = '{http://ilk.uvt.nl/FoLiA}pos'
lemma_tag = '{http://ilk.uvt.nl/FoLiA}lemma'
speech_tag = '{http://www.politicalmashup.nl}speech'
party_tag = '{http://www.politicalmashup.nl}party'
date_tag = '{http://purl.org/dc/elements/1.1/}date'
f = gzip.open(data_file)
context = etree.iterparse(f, events=('end',), tag=(speech_tag, date_tag),
huge_tree=True)
for event, elem in context:
if elem.tag == date_tag:
pass
if elem.tag == speech_tag:
num_speech += 1
party = elem.attrib.get(party_tag)
if party:
# prevent unwanted subdirectories to be created (happens
# when there is a / in the party name)
party = party.replace('/', '-')
parties[party] += 1
else:
num_speech_without_party += 1
del context
f.close()
print 'num speech,', num_speech
print 'num speech without party,', num_speech_without_party
for p, f in parties.most_common():
print p, f
speeches_found = sum(parties.values())
print 'speeches found,', speeches_found
print 'speeches without parties + speeches_found,', num_speech_without_party+speeches_found
|
|
a9efa21d153faea171c7e916de91ee787e877a69
|
pyfibot/util/find_config_opts.py
|
pyfibot/util/find_config_opts.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Small script to find variables that can be declared in config...
import os
import re
from pprint import pprint
get_rg = re.compile(r'[^\.](config|settings|network_conf)\.get\((.*?),(.*?)\)')
def clean_string(string):
return string.strip().strip('\'"')
def find_gets(path):
config_options = {}
for f in os.listdir(path):
if not f.endswith('.py'):
continue
with open(os.path.join(path, f), 'r') as f_handle:
lines = f_handle.readlines()
for l in lines:
m = get_rg.search(l)
if m:
if f not in config_options:
config_options[f] = {}
config_options[f][clean_string(m.group(2))] = clean_string(m.group(3))
pprint(config_options)
if __name__ == '__main__':
for p in ['pyfibot', 'pyfibot/modules', 'pyfibot/modules/available']:
find_gets(os.path.join('.', p))
|
Add util to find config options
|
Add util to find config options
|
Python
|
bsd-3-clause
|
aapa/pyfibot,huqa/pyfibot,lepinkainen/pyfibot,lepinkainen/pyfibot,EArmour/pyfibot,EArmour/pyfibot,aapa/pyfibot,huqa/pyfibot
|
Add util to find config options
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Small script to find variables that can be declared in config...
import os
import re
from pprint import pprint
get_rg = re.compile(r'[^\.](config|settings|network_conf)\.get\((.*?),(.*?)\)')
def clean_string(string):
return string.strip().strip('\'"')
def find_gets(path):
config_options = {}
for f in os.listdir(path):
if not f.endswith('.py'):
continue
with open(os.path.join(path, f), 'r') as f_handle:
lines = f_handle.readlines()
for l in lines:
m = get_rg.search(l)
if m:
if f not in config_options:
config_options[f] = {}
config_options[f][clean_string(m.group(2))] = clean_string(m.group(3))
pprint(config_options)
if __name__ == '__main__':
for p in ['pyfibot', 'pyfibot/modules', 'pyfibot/modules/available']:
find_gets(os.path.join('.', p))
|
<commit_before><commit_msg>Add util to find config options<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Small script to find variables that can be declared in config...
import os
import re
from pprint import pprint
get_rg = re.compile(r'[^\.](config|settings|network_conf)\.get\((.*?),(.*?)\)')
def clean_string(string):
return string.strip().strip('\'"')
def find_gets(path):
config_options = {}
for f in os.listdir(path):
if not f.endswith('.py'):
continue
with open(os.path.join(path, f), 'r') as f_handle:
lines = f_handle.readlines()
for l in lines:
m = get_rg.search(l)
if m:
if f not in config_options:
config_options[f] = {}
config_options[f][clean_string(m.group(2))] = clean_string(m.group(3))
pprint(config_options)
if __name__ == '__main__':
for p in ['pyfibot', 'pyfibot/modules', 'pyfibot/modules/available']:
find_gets(os.path.join('.', p))
|
Add util to find config options#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Small script to find variables that can be declared in config...
import os
import re
from pprint import pprint
get_rg = re.compile(r'[^\.](config|settings|network_conf)\.get\((.*?),(.*?)\)')
def clean_string(string):
return string.strip().strip('\'"')
def find_gets(path):
config_options = {}
for f in os.listdir(path):
if not f.endswith('.py'):
continue
with open(os.path.join(path, f), 'r') as f_handle:
lines = f_handle.readlines()
for l in lines:
m = get_rg.search(l)
if m:
if f not in config_options:
config_options[f] = {}
config_options[f][clean_string(m.group(2))] = clean_string(m.group(3))
pprint(config_options)
if __name__ == '__main__':
for p in ['pyfibot', 'pyfibot/modules', 'pyfibot/modules/available']:
find_gets(os.path.join('.', p))
|
<commit_before><commit_msg>Add util to find config options<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Small script to find variables that can be declared in config...
import os
import re
from pprint import pprint
get_rg = re.compile(r'[^\.](config|settings|network_conf)\.get\((.*?),(.*?)\)')
def clean_string(string):
return string.strip().strip('\'"')
def find_gets(path):
config_options = {}
for f in os.listdir(path):
if not f.endswith('.py'):
continue
with open(os.path.join(path, f), 'r') as f_handle:
lines = f_handle.readlines()
for l in lines:
m = get_rg.search(l)
if m:
if f not in config_options:
config_options[f] = {}
config_options[f][clean_string(m.group(2))] = clean_string(m.group(3))
pprint(config_options)
if __name__ == '__main__':
for p in ['pyfibot', 'pyfibot/modules', 'pyfibot/modules/available']:
find_gets(os.path.join('.', p))
|
|
b4421d0fa5a4c8dd509513553939958ee616d46a
|
msibi/utils/plot_fit.py
|
msibi/utils/plot_fit.py
|
import os.path
def plot_pair_fits(pair, fits, use_agg=False):
if use_agg:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for state, fit in fits[pair].iteritems():
ax.plot(fit, label=state)
ax.set_xlabel('step')
ax.set_ylabel('relative fit')
ax.legend(loc='best')
ax.suptitle(pair)
fig.tight_layout()
fig.savefig('figures/%s-fit.pdf' % pair)
plt.close('all')
def find_fits(filename):
# errs is a dict with keys 'type1-type2' for the pairs
# the values are dicts, with the keys the state name and the values the
# list of fit values at that state
fits = {}
for line in open(filename, 'r'):
try:
keyword = line.split()[1]
except IndexError:
pass
if keyword == 'pair':
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]].append(line.split()[-1])
except KeyError: # pair not in fits
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]] = [line.split()[-1]]
except KeyError: # state not in pairs in fits
fits[line.split()[2][:-1]] = {line.split()[4][:-1]: [line.split()[-1]]}
return fits
def plot_all(filename, use_agg=False):
"""Plot fitness function vs. iteration for each pair at each state
Args
----
filename : str
Name of file from which to read.
use_agg : bool
Use Agg backend if True - may be useful on clusters with no display
Returns
-------
Nothing is returned, but plots are made for each pair.
If the directory './figures' does not exist, it is created, and the figures
are saved in that directory with the name 'type1-type2-fit.pdf'.
The filename should where the optimization output was redirected, as the
format is determined by the MSIBI.optimize() function.
"""
fits = find_fits(filename)
if not os.path.exists('figures'):
os.makedirs('figures')
for pair in fits:
plot_pair_fits(pair, fits)
|
Add function for plotting fit vs step
|
Add function for plotting fit vs step
A simple function to plot the fitness function vs. iteration from the
logfile generated by MSIBI.optimize().
This may belong in a different location, but putting it here for now.
|
Python
|
mit
|
ctk3b/msibi,mosdef-hub/msibi,mosdef-hub/msibi
|
Add function for plotting fit vs step
A simple function to plot the fitness function vs. iteration from the
logfile generated by MSIBI.optimize().
This may belong in a different location, but putting it here for now.
|
import os.path
def plot_pair_fits(pair, fits, use_agg=False):
if use_agg:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for state, fit in fits[pair].iteritems():
ax.plot(fit, label=state)
ax.set_xlabel('step')
ax.set_ylabel('relative fit')
ax.legend(loc='best')
ax.suptitle(pair)
fig.tight_layout()
fig.savefig('figures/%s-fit.pdf' % pair)
plt.close('all')
def find_fits(filename):
# errs is a dict with keys 'type1-type2' for the pairs
# the values are dicts, with the keys the state name and the values the
# list of fit values at that state
fits = {}
for line in open(filename, 'r'):
try:
keyword = line.split()[1]
except IndexError:
pass
if keyword == 'pair':
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]].append(line.split()[-1])
except KeyError: # pair not in fits
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]] = [line.split()[-1]]
except KeyError: # state not in pairs in fits
fits[line.split()[2][:-1]] = {line.split()[4][:-1]: [line.split()[-1]]}
return fits
def plot_all(filename, use_agg=False):
"""Plot fitness function vs. iteration for each pair at each state
Args
----
filename : str
Name of file from which to read.
use_agg : bool
Use Agg backend if True - may be useful on clusters with no display
Returns
-------
Nothing is returned, but plots are made for each pair.
If the directory './figures' does not exist, it is created, and the figures
are saved in that directory with the name 'type1-type2-fit.pdf'.
The filename should where the optimization output was redirected, as the
format is determined by the MSIBI.optimize() function.
"""
fits = find_fits(filename)
if not os.path.exists('figures'):
os.makedirs('figures')
for pair in fits:
plot_pair_fits(pair, fits)
|
<commit_before><commit_msg>Add function for plotting fit vs step
A simple function to plot the fitness function vs. iteration from the
logfile generated by MSIBI.optimize().
This may belong in a different location, but putting it here for now.<commit_after>
|
import os.path
def plot_pair_fits(pair, fits, use_agg=False):
if use_agg:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for state, fit in fits[pair].iteritems():
ax.plot(fit, label=state)
ax.set_xlabel('step')
ax.set_ylabel('relative fit')
ax.legend(loc='best')
ax.suptitle(pair)
fig.tight_layout()
fig.savefig('figures/%s-fit.pdf' % pair)
plt.close('all')
def find_fits(filename):
# errs is a dict with keys 'type1-type2' for the pairs
# the values are dicts, with the keys the state name and the values the
# list of fit values at that state
fits = {}
for line in open(filename, 'r'):
try:
keyword = line.split()[1]
except IndexError:
pass
if keyword == 'pair':
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]].append(line.split()[-1])
except KeyError: # pair not in fits
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]] = [line.split()[-1]]
except KeyError: # state not in pairs in fits
fits[line.split()[2][:-1]] = {line.split()[4][:-1]: [line.split()[-1]]}
return fits
def plot_all(filename, use_agg=False):
"""Plot fitness function vs. iteration for each pair at each state
Args
----
filename : str
Name of file from which to read.
use_agg : bool
Use Agg backend if True - may be useful on clusters with no display
Returns
-------
Nothing is returned, but plots are made for each pair.
If the directory './figures' does not exist, it is created, and the figures
are saved in that directory with the name 'type1-type2-fit.pdf'.
The filename should where the optimization output was redirected, as the
format is determined by the MSIBI.optimize() function.
"""
fits = find_fits(filename)
if not os.path.exists('figures'):
os.makedirs('figures')
for pair in fits:
plot_pair_fits(pair, fits)
|
Add function for plotting fit vs step
A simple function to plot the fitness function vs. iteration from the
logfile generated by MSIBI.optimize().
This may belong in a different location, but putting it here for now.import os.path
def plot_pair_fits(pair, fits, use_agg=False):
if use_agg:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for state, fit in fits[pair].iteritems():
ax.plot(fit, label=state)
ax.set_xlabel('step')
ax.set_ylabel('relative fit')
ax.legend(loc='best')
ax.suptitle(pair)
fig.tight_layout()
fig.savefig('figures/%s-fit.pdf' % pair)
plt.close('all')
def find_fits(filename):
# errs is a dict with keys 'type1-type2' for the pairs
# the values are dicts, with the keys the state name and the values the
# list of fit values at that state
fits = {}
for line in open(filename, 'r'):
try:
keyword = line.split()[1]
except IndexError:
pass
if keyword == 'pair':
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]].append(line.split()[-1])
except KeyError: # pair not in fits
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]] = [line.split()[-1]]
except KeyError: # state not in pairs in fits
fits[line.split()[2][:-1]] = {line.split()[4][:-1]: [line.split()[-1]]}
return fits
def plot_all(filename, use_agg=False):
"""Plot fitness function vs. iteration for each pair at each state
Args
----
filename : str
Name of file from which to read.
use_agg : bool
Use Agg backend if True - may be useful on clusters with no display
Returns
-------
Nothing is returned, but plots are made for each pair.
If the directory './figures' does not exist, it is created, and the figures
are saved in that directory with the name 'type1-type2-fit.pdf'.
The filename should where the optimization output was redirected, as the
format is determined by the MSIBI.optimize() function.
"""
fits = find_fits(filename)
if not os.path.exists('figures'):
os.makedirs('figures')
for pair in fits:
plot_pair_fits(pair, fits)
|
<commit_before><commit_msg>Add function for plotting fit vs step
A simple function to plot the fitness function vs. iteration from the
logfile generated by MSIBI.optimize().
This may belong in a different location, but putting it here for now.<commit_after>import os.path
def plot_pair_fits(pair, fits, use_agg=False):
if use_agg:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for state, fit in fits[pair].iteritems():
ax.plot(fit, label=state)
ax.set_xlabel('step')
ax.set_ylabel('relative fit')
ax.legend(loc='best')
ax.suptitle(pair)
fig.tight_layout()
fig.savefig('figures/%s-fit.pdf' % pair)
plt.close('all')
def find_fits(filename):
# errs is a dict with keys 'type1-type2' for the pairs
# the values are dicts, with the keys the state name and the values the
# list of fit values at that state
fits = {}
for line in open(filename, 'r'):
try:
keyword = line.split()[1]
except IndexError:
pass
if keyword == 'pair':
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]].append(line.split()[-1])
except KeyError: # pair not in fits
try:
fits[line.split()[2][:-1]][line.split()[4][:-1]] = [line.split()[-1]]
except KeyError: # state not in pairs in fits
fits[line.split()[2][:-1]] = {line.split()[4][:-1]: [line.split()[-1]]}
return fits
def plot_all(filename, use_agg=False):
"""Plot fitness function vs. iteration for each pair at each state
Args
----
filename : str
Name of file from which to read.
use_agg : bool
Use Agg backend if True - may be useful on clusters with no display
Returns
-------
Nothing is returned, but plots are made for each pair.
If the directory './figures' does not exist, it is created, and the figures
are saved in that directory with the name 'type1-type2-fit.pdf'.
The filename should where the optimization output was redirected, as the
format is determined by the MSIBI.optimize() function.
"""
fits = find_fits(filename)
if not os.path.exists('figures'):
os.makedirs('figures')
for pair in fits:
plot_pair_fits(pair, fits)
|
|
efc5fc4c5da18906efb8914364de0ee72fc85a5e
|
scripts/set-artist-streamable.py
|
scripts/set-artist-streamable.py
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class SetArtistStreamable:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def updateAll(self):
"""Sets artists streamable property if they have streamable tracks already in the database"""
self.cursor.execute("SELECT DISTINCT(artist.name) FROM artist INNER JOIN track on artist.name=artist_name WHERE track.streamable = 1")
for artist in self.cursor.fetchall():
name = artist[0]
print "marking %s as streamable... " % name
self.cursor.execute("UPDATE artist SET streamable = 1 WHERE name = %s", (name,))
print "Applying changes... ",
self.conn.commit()
print "done."
if __name__ == '__main__':
sas = SetArtistStreamable()
sas.updateAll()
|
Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)
|
Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class SetArtistStreamable:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def updateAll(self):
"""Sets artists streamable property if they have streamable tracks already in the database"""
self.cursor.execute("SELECT DISTINCT(artist.name) FROM artist INNER JOIN track on artist.name=artist_name WHERE track.streamable = 1")
for artist in self.cursor.fetchall():
name = artist[0]
print "marking %s as streamable... " % name
self.cursor.execute("UPDATE artist SET streamable = 1 WHERE name = %s", (name,))
print "Applying changes... ",
self.conn.commit()
print "done."
if __name__ == '__main__':
sas = SetArtistStreamable()
sas.updateAll()
|
<commit_before><commit_msg>Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)<commit_after>
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class SetArtistStreamable:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def updateAll(self):
"""Sets artists streamable property if they have streamable tracks already in the database"""
self.cursor.execute("SELECT DISTINCT(artist.name) FROM artist INNER JOIN track on artist.name=artist_name WHERE track.streamable = 1")
for artist in self.cursor.fetchall():
name = artist[0]
print "marking %s as streamable... " % name
self.cursor.execute("UPDATE artist SET streamable = 1 WHERE name = %s", (name,))
print "Applying changes... ",
self.conn.commit()
print "done."
if __name__ == '__main__':
sas = SetArtistStreamable()
sas.updateAll()
|
Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class SetArtistStreamable:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def updateAll(self):
"""Sets artists streamable property if they have streamable tracks already in the database"""
self.cursor.execute("SELECT DISTINCT(artist.name) FROM artist INNER JOIN track on artist.name=artist_name WHERE track.streamable = 1")
for artist in self.cursor.fetchall():
name = artist[0]
print "marking %s as streamable... " % name
self.cursor.execute("UPDATE artist SET streamable = 1 WHERE name = %s", (name,))
print "Applying changes... ",
self.conn.commit()
print "done."
if __name__ == '__main__':
sas = SetArtistStreamable()
sas.updateAll()
|
<commit_before><commit_msg>Add script for updating previously imported artists with the streamable property (so we don't have to query tracks to find out who's streamable)<commit_after>#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class SetArtistStreamable:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def updateAll(self):
"""Sets artists streamable property if they have streamable tracks already in the database"""
self.cursor.execute("SELECT DISTINCT(artist.name) FROM artist INNER JOIN track on artist.name=artist_name WHERE track.streamable = 1")
for artist in self.cursor.fetchall():
name = artist[0]
print "marking %s as streamable... " % name
self.cursor.execute("UPDATE artist SET streamable = 1 WHERE name = %s", (name,))
print "Applying changes... ",
self.conn.commit()
print "done."
if __name__ == '__main__':
sas = SetArtistStreamable()
sas.updateAll()
|
|
0899c6516bd9bf8fdd4bd973106eaa1ffcf67667
|
pirate_add_shift_recurrence.py
|
pirate_add_shift_recurrence.py
|
#!/usr/bin/python
import sys
import os
from tasklib.task import Task, TaskWarrior
time_attributes = ('wait', 'until', 'scheduled')
def is_new_local_recurrence_child_task(task):
# Do not affect tasks not spun by recurrence
if not task['parent']:
return False
# Newly created recurrence tasks actually have
# modified field copied from the parent, thus
# older than entry field (until their ID is generated)
if (task['modified'] - task['entry']).total_seconds() < 0:
return True
tw = TaskWarrior(data_location=os.path.dirname(os.path.dirname(sys.argv[0])))
def hook_shift_recurrence(task):
if is_new_local_recurrence_child_task(task):
parent = tw.tasks.get(uuid=task['parent'])
parent_due_shift = task['due'] - parent['due']
for attr in time_attributes:
if parent[attr]:
task[attr] = parent[attr] + parent_due_shift
|
Add pirate-enabled version of the hook
|
Add pirate-enabled version of the hook
|
Python
|
mit
|
tbabej/task.shift-recurrence
|
Add pirate-enabled version of the hook
|
#!/usr/bin/python
import sys
import os
from tasklib.task import Task, TaskWarrior
time_attributes = ('wait', 'until', 'scheduled')
def is_new_local_recurrence_child_task(task):
# Do not affect tasks not spun by recurrence
if not task['parent']:
return False
# Newly created recurrence tasks actually have
# modified field copied from the parent, thus
# older than entry field (until their ID is generated)
if (task['modified'] - task['entry']).total_seconds() < 0:
return True
tw = TaskWarrior(data_location=os.path.dirname(os.path.dirname(sys.argv[0])))
def hook_shift_recurrence(task):
if is_new_local_recurrence_child_task(task):
parent = tw.tasks.get(uuid=task['parent'])
parent_due_shift = task['due'] - parent['due']
for attr in time_attributes:
if parent[attr]:
task[attr] = parent[attr] + parent_due_shift
|
<commit_before><commit_msg>Add pirate-enabled version of the hook<commit_after>
|
#!/usr/bin/python
import sys
import os
from tasklib.task import Task, TaskWarrior
time_attributes = ('wait', 'until', 'scheduled')
def is_new_local_recurrence_child_task(task):
# Do not affect tasks not spun by recurrence
if not task['parent']:
return False
# Newly created recurrence tasks actually have
# modified field copied from the parent, thus
# older than entry field (until their ID is generated)
if (task['modified'] - task['entry']).total_seconds() < 0:
return True
tw = TaskWarrior(data_location=os.path.dirname(os.path.dirname(sys.argv[0])))
def hook_shift_recurrence(task):
if is_new_local_recurrence_child_task(task):
parent = tw.tasks.get(uuid=task['parent'])
parent_due_shift = task['due'] - parent['due']
for attr in time_attributes:
if parent[attr]:
task[attr] = parent[attr] + parent_due_shift
|
Add pirate-enabled version of the hook#!/usr/bin/python
import sys
import os
from tasklib.task import Task, TaskWarrior
time_attributes = ('wait', 'until', 'scheduled')
def is_new_local_recurrence_child_task(task):
# Do not affect tasks not spun by recurrence
if not task['parent']:
return False
# Newly created recurrence tasks actually have
# modified field copied from the parent, thus
# older than entry field (until their ID is generated)
if (task['modified'] - task['entry']).total_seconds() < 0:
return True
tw = TaskWarrior(data_location=os.path.dirname(os.path.dirname(sys.argv[0])))
def hook_shift_recurrence(task):
if is_new_local_recurrence_child_task(task):
parent = tw.tasks.get(uuid=task['parent'])
parent_due_shift = task['due'] - parent['due']
for attr in time_attributes:
if parent[attr]:
task[attr] = parent[attr] + parent_due_shift
|
<commit_before><commit_msg>Add pirate-enabled version of the hook<commit_after>#!/usr/bin/python
import sys
import os
from tasklib.task import Task, TaskWarrior
time_attributes = ('wait', 'until', 'scheduled')
def is_new_local_recurrence_child_task(task):
# Do not affect tasks not spun by recurrence
if not task['parent']:
return False
# Newly created recurrence tasks actually have
# modified field copied from the parent, thus
# older than entry field (until their ID is generated)
if (task['modified'] - task['entry']).total_seconds() < 0:
return True
tw = TaskWarrior(data_location=os.path.dirname(os.path.dirname(sys.argv[0])))
def hook_shift_recurrence(task):
if is_new_local_recurrence_child_task(task):
parent = tw.tasks.get(uuid=task['parent'])
parent_due_shift = task['due'] - parent['due']
for attr in time_attributes:
if parent[attr]:
task[attr] = parent[attr] + parent_due_shift
|
|
71cdf03644215ec3032605e6772dc897d0d78b05
|
Sensors/testMouse.py
|
Sensors/testMouse.py
|
from pymouse import PyMouse
import time
# This script demonstrates the possibility to use a mouse as an unbound sensor.
# To do that the cursor position is brought back to the middle of the screen at each step, and the distance moved by the mouse are integrated
# This script is intended to be used with a second external mouse.
# This can be achieved using MPX, and inverting the laptop mouse and externa mouse.
# Warning: it may be difficult to stop this script with the mouse, so be sure you are able to do it with keyboard command!
cursor = PyMouse()
x = 0
y = 0
screen_size = cursor.screen_size()
cursor.move(screen_size[0]/2, screen_size[1]/2)
while True:
p = cursor.position()
x = x + p[0] - screen_size[0]/2
y = y + p[1] - screen_size[1]/2
print x, y, p[0] - s[0]/2, p[1] - s[1]/2
cursor.move(screen_size[0]/2, screen_size[1]/ 2)
time.sleep(0.01)
|
Add a test to check if an optical mouse can be used as a course sensor
|
Add a test to check if an optical mouse can be used as a course sensor
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a test to check if an optical mouse can be used as a course sensor
|
from pymouse import PyMouse
import time
# This script demonstrates the possibility to use a mouse as an unbound sensor.
# To do that the cursor position is brought back to the middle of the screen at each step, and the distance moved by the mouse are integrated
# This script is intended to be used with a second external mouse.
# This can be achieved using MPX, and inverting the laptop mouse and externa mouse.
# Warning: it may be difficult to stop this script with the mouse, so be sure you are able to do it with keyboard command!
cursor = PyMouse()
x = 0
y = 0
screen_size = cursor.screen_size()
cursor.move(screen_size[0]/2, screen_size[1]/2)
while True:
p = cursor.position()
x = x + p[0] - screen_size[0]/2
y = y + p[1] - screen_size[1]/2
print x, y, p[0] - s[0]/2, p[1] - s[1]/2
cursor.move(screen_size[0]/2, screen_size[1]/ 2)
time.sleep(0.01)
|
<commit_before><commit_msg>Add a test to check if an optical mouse can be used as a course sensor<commit_after>
|
from pymouse import PyMouse
import time
# This script demonstrates the possibility to use a mouse as an unbound sensor.
# To do that the cursor position is brought back to the middle of the screen at each step, and the distance moved by the mouse are integrated
# This script is intended to be used with a second external mouse.
# This can be achieved using MPX, and inverting the laptop mouse and externa mouse.
# Warning: it may be difficult to stop this script with the mouse, so be sure you are able to do it with keyboard command!
cursor = PyMouse()
x = 0
y = 0
screen_size = cursor.screen_size()
cursor.move(screen_size[0]/2, screen_size[1]/2)
while True:
p = cursor.position()
x = x + p[0] - screen_size[0]/2
y = y + p[1] - screen_size[1]/2
print x, y, p[0] - s[0]/2, p[1] - s[1]/2
cursor.move(screen_size[0]/2, screen_size[1]/ 2)
time.sleep(0.01)
|
Add a test to check if an optical mouse can be used as a course sensorfrom pymouse import PyMouse
import time
# This script demonstrates the possibility to use a mouse as an unbound sensor.
# To do that the cursor position is brought back to the middle of the screen at each step, and the distance moved by the mouse are integrated
# This script is intended to be used with a second external mouse.
# This can be achieved using MPX, and inverting the laptop mouse and externa mouse.
# Warning: it may be difficult to stop this script with the mouse, so be sure you are able to do it with keyboard command!
cursor = PyMouse()
x = 0
y = 0
screen_size = cursor.screen_size()
cursor.move(screen_size[0]/2, screen_size[1]/2)
while True:
p = cursor.position()
x = x + p[0] - screen_size[0]/2
y = y + p[1] - screen_size[1]/2
print x, y, p[0] - s[0]/2, p[1] - s[1]/2
cursor.move(screen_size[0]/2, screen_size[1]/ 2)
time.sleep(0.01)
|
<commit_before><commit_msg>Add a test to check if an optical mouse can be used as a course sensor<commit_after>from pymouse import PyMouse
import time
# This script demonstrates the possibility to use a mouse as an unbound sensor.
# To do that the cursor position is brought back to the middle of the screen at each step, and the distance moved by the mouse are integrated
# This script is intended to be used with a second external mouse.
# This can be achieved using MPX, and inverting the laptop mouse and externa mouse.
# Warning: it may be difficult to stop this script with the mouse, so be sure you are able to do it with keyboard command!
cursor = PyMouse()
x = 0
y = 0
screen_size = cursor.screen_size()
cursor.move(screen_size[0]/2, screen_size[1]/2)
while True:
p = cursor.position()
x = x + p[0] - screen_size[0]/2
y = y + p[1] - screen_size[1]/2
print x, y, p[0] - s[0]/2, p[1] - s[1]/2
cursor.move(screen_size[0]/2, screen_size[1]/ 2)
time.sleep(0.01)
|
|
5582e074cc68dc7b97d4b33211b74fb4813e2f17
|
Sorting.py
|
Sorting.py
|
# Sorting of numbers
# we will stick to acending - read it backwards otherwise :P
def InsertionSort(A):
# As A[0] is sorted by itself we need to take the number to be sorted from 1 to n-1
for i in range(1,len(A)):
key = A[i] # Take the next number in unsorted section
for j in range(0,i): # go through the sorted numbers
if(key<A[j]):
A.insert(j,key) # COPY "key" into the right spot
A.pop(i+1) # remove "key" from its initial position
break
response = 1
while(response == 1):
print "Enter how many numbers are to be sorted"
n = int(raw_input())
A = []
print "Enter the "+str(n)+" numbers :-"
for i in range(n):
A.append(float(raw_input()))
print "Enter 1 for Insertion Sort"
choice = int(raw_input())
if(choice == 1):
print "Executing insertion sort on the following numbers"
print A
InsertionSort(A)
print "The sorted numbers are"
print A
else:
print "invalid option"
print "enter 1 for trying with a different set of numbers"
response = int(raw_input())
print "thanks for using my program"
|
Add Insertion Sort for float numbers
|
Add Insertion Sort for float numbers
I am planning to implement all sorting techniques I learn in this script
Also I will try to incoporate time comparison at a later stage.
Signed-off-by: Aditya Prasad <5c5d1de3672d25c1ad997cc5b3c40a32c347e962@gmail.com>
|
Python
|
mit
|
Aditya8795/Python-Scripts
|
Add Insertion Sort for float numbers
I am planning to implement all sorting techniques I learn in this script
Also I will try to incoporate time comparison at a later stage.
Signed-off-by: Aditya Prasad <5c5d1de3672d25c1ad997cc5b3c40a32c347e962@gmail.com>
|
# Sorting of numbers
# we will stick to acending - read it backwards otherwise :P
def InsertionSort(A):
# As A[0] is sorted by itself we need to take the number to be sorted from 1 to n-1
for i in range(1,len(A)):
key = A[i] # Take the next number in unsorted section
for j in range(0,i): # go through the sorted numbers
if(key<A[j]):
A.insert(j,key) # COPY "key" into the right spot
A.pop(i+1) # remove "key" from its initial position
break
response = 1
while(response == 1):
print "Enter how many numbers are to be sorted"
n = int(raw_input())
A = []
print "Enter the "+str(n)+" numbers :-"
for i in range(n):
A.append(float(raw_input()))
print "Enter 1 for Insertion Sort"
choice = int(raw_input())
if(choice == 1):
print "Executing insertion sort on the following numbers"
print A
InsertionSort(A)
print "The sorted numbers are"
print A
else:
print "invalid option"
print "enter 1 for trying with a different set of numbers"
response = int(raw_input())
print "thanks for using my program"
|
<commit_before><commit_msg>Add Insertion Sort for float numbers
I am planning to implement all sorting techniques I learn in this script
Also I will try to incoporate time comparison at a later stage.
Signed-off-by: Aditya Prasad <5c5d1de3672d25c1ad997cc5b3c40a32c347e962@gmail.com><commit_after>
|
# Sorting of numbers
# we will stick to acending - read it backwards otherwise :P
def InsertionSort(A):
# As A[0] is sorted by itself we need to take the number to be sorted from 1 to n-1
for i in range(1,len(A)):
key = A[i] # Take the next number in unsorted section
for j in range(0,i): # go through the sorted numbers
if(key<A[j]):
A.insert(j,key) # COPY "key" into the right spot
A.pop(i+1) # remove "key" from its initial position
break
response = 1
while(response == 1):
print "Enter how many numbers are to be sorted"
n = int(raw_input())
A = []
print "Enter the "+str(n)+" numbers :-"
for i in range(n):
A.append(float(raw_input()))
print "Enter 1 for Insertion Sort"
choice = int(raw_input())
if(choice == 1):
print "Executing insertion sort on the following numbers"
print A
InsertionSort(A)
print "The sorted numbers are"
print A
else:
print "invalid option"
print "enter 1 for trying with a different set of numbers"
response = int(raw_input())
print "thanks for using my program"
|
Add Insertion Sort for float numbers
I am planning to implement all sorting techniques I learn in this script
Also I will try to incoporate time comparison at a later stage.
Signed-off-by: Aditya Prasad <5c5d1de3672d25c1ad997cc5b3c40a32c347e962@gmail.com># Sorting of numbers
# we will stick to acending - read it backwards otherwise :P
def InsertionSort(A):
# As A[0] is sorted by itself we need to take the number to be sorted from 1 to n-1
for i in range(1,len(A)):
key = A[i] # Take the next number in unsorted section
for j in range(0,i): # go through the sorted numbers
if(key<A[j]):
A.insert(j,key) # COPY "key" into the right spot
A.pop(i+1) # remove "key" from its initial position
break
response = 1
while(response == 1):
print "Enter how many numbers are to be sorted"
n = int(raw_input())
A = []
print "Enter the "+str(n)+" numbers :-"
for i in range(n):
A.append(float(raw_input()))
print "Enter 1 for Insertion Sort"
choice = int(raw_input())
if(choice == 1):
print "Executing insertion sort on the following numbers"
print A
InsertionSort(A)
print "The sorted numbers are"
print A
else:
print "invalid option"
print "enter 1 for trying with a different set of numbers"
response = int(raw_input())
print "thanks for using my program"
|
<commit_before><commit_msg>Add Insertion Sort for float numbers
I am planning to implement all sorting techniques I learn in this script
Also I will try to incoporate time comparison at a later stage.
Signed-off-by: Aditya Prasad <5c5d1de3672d25c1ad997cc5b3c40a32c347e962@gmail.com><commit_after># Sorting of numbers
# we will stick to acending - read it backwards otherwise :P
def InsertionSort(A):
# As A[0] is sorted by itself we need to take the number to be sorted from 1 to n-1
for i in range(1,len(A)):
key = A[i] # Take the next number in unsorted section
for j in range(0,i): # go through the sorted numbers
if(key<A[j]):
A.insert(j,key) # COPY "key" into the right spot
A.pop(i+1) # remove "key" from its initial position
break
response = 1
while(response == 1):
print "Enter how many numbers are to be sorted"
n = int(raw_input())
A = []
print "Enter the "+str(n)+" numbers :-"
for i in range(n):
A.append(float(raw_input()))
print "Enter 1 for Insertion Sort"
choice = int(raw_input())
if(choice == 1):
print "Executing insertion sort on the following numbers"
print A
InsertionSort(A)
print "The sorted numbers are"
print A
else:
print "invalid option"
print "enter 1 for trying with a different set of numbers"
response = int(raw_input())
print "thanks for using my program"
|
|
fcab7caa8bfd8a7bb3735aa6b61c226a9ad6426e
|
exp/alto/tools/filter_width.py
|
exp/alto/tools/filter_width.py
|
#!/usr/bin/env python3
import sys
from nltk.tree import Tree
def sort_nps():
with open(sys.argv[1]) as np_doc:
for line in np_doc:
t = Tree.fromstring(line)
width = len(t)
if width <= 3:
print(line, end = "")
sort_nps()
|
Add script for getting rid of too wide NPs
|
Add script for getting rid of too wide NPs
|
Python
|
mit
|
kornai/4lang,kornai/4lang,kornai/4lang,kornai/4lang
|
Add script for getting rid of too wide NPs
|
#!/usr/bin/env python3
import sys
from nltk.tree import Tree
def sort_nps():
with open(sys.argv[1]) as np_doc:
for line in np_doc:
t = Tree.fromstring(line)
width = len(t)
if width <= 3:
print(line, end = "")
sort_nps()
|
<commit_before><commit_msg>Add script for getting rid of too wide NPs<commit_after>
|
#!/usr/bin/env python3
import sys
from nltk.tree import Tree
def sort_nps():
with open(sys.argv[1]) as np_doc:
for line in np_doc:
t = Tree.fromstring(line)
width = len(t)
if width <= 3:
print(line, end = "")
sort_nps()
|
Add script for getting rid of too wide NPs#!/usr/bin/env python3
import sys
from nltk.tree import Tree
def sort_nps():
with open(sys.argv[1]) as np_doc:
for line in np_doc:
t = Tree.fromstring(line)
width = len(t)
if width <= 3:
print(line, end = "")
sort_nps()
|
<commit_before><commit_msg>Add script for getting rid of too wide NPs<commit_after>#!/usr/bin/env python3
import sys
from nltk.tree import Tree
def sort_nps():
with open(sys.argv[1]) as np_doc:
for line in np_doc:
t = Tree.fromstring(line)
width = len(t)
if width <= 3:
print(line, end = "")
sort_nps()
|
|
9ddd96bbe727f61c41f73d786b5beea9a85482a9
|
project/velkoja/management/commands/check_nordea_overdue.py
|
project/velkoja/management/commands/check_nordea_overdue.py
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.nordeachecker import NordeaOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Check overdue Nordea payments and send emails about them'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = NordeaOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n, t in notified:
print("Notified %s about %s" % (n.email, t))
|
Add management command to run the checks
|
Add management command to run the checks
|
Python
|
mit
|
HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum
|
Add management command to run the checks
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.nordeachecker import NordeaOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Check overdue Nordea payments and send emails about them'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = NordeaOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n, t in notified:
print("Notified %s about %s" % (n.email, t))
|
<commit_before><commit_msg>Add management command to run the checks<commit_after>
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.nordeachecker import NordeaOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Check overdue Nordea payments and send emails about them'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = NordeaOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n, t in notified:
print("Notified %s about %s" % (n.email, t))
|
Add management command to run the checks# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.nordeachecker import NordeaOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Check overdue Nordea payments and send emails about them'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = NordeaOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n, t in notified:
print("Notified %s about %s" % (n.email, t))
|
<commit_before><commit_msg>Add management command to run the checks<commit_after># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from velkoja.nordeachecker import NordeaOverdueInvoicesHandler
class Command(BaseCommand):
help = 'Check overdue Nordea payments and send emails about them'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
handler = NordeaOverdueInvoicesHandler()
notified = handler.process_overdue(send=True)
if options['verbosity'] > 1:
for n, t in notified:
print("Notified %s about %s" % (n.email, t))
|
|
20c6e2d41e0848fddb3ff3829720ab43a71f41a9
|
ideascube/conf/kb_babylab_civ.py
|
ideascube/conf/kb_babylab_civ.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Add conf file for BabyLab KoomBook
|
Add conf file for BabyLab KoomBook
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for BabyLab KoomBook
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Add conf file for BabyLab KoomBook<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Add conf file for BabyLab KoomBook# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Add conf file for BabyLab KoomBook<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'BabyLab'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'khanacademy',
},
{
'id': 'wikistage',
'languages': ['fr']
},
{
'id': 'wikimooc',
'languages': ['fr']
},
{
'id': 'vikidia',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'e-penser',
'languages': ['fr']
},
{
'id': 'deus-ex-silicium',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
|
8840e38e5d3107564338de4e47bfd65aae5723ee
|
scripts/git-pre-cloner.py
|
scripts/git-pre-cloner.py
|
#!/usr/bin/python
import argparse
import os
import sys
import string
import subprocess
import escapism
safe_chars = set(string.ascii_lowercase + string.digits)
repo = 'https://github.com/data-8/materials-fa17.git'
local_repo = '/export/pool0/homes/_repo'
cwd_tmpl = '/export/pool0/homes/{}'
def safe_username(username):
return escapism.escape(username, safe=safe_chars, escape_char='-').lower()
def home_directory(username):
home_dir = cwd_tmpl.format(username)
if not os.path.exists(home_dir):
os.mkdir(home_dir)
return home_dir
def git_clone():
if os.path.exists(os.path.join(local_repo, repo_dirname)):
return
out = subprocess.check_output(['git', 'clone', args.repo],
cwd=local_repo).decode('utf-8')
def copy_repo(username):
safe = safe_username(username)
home_dir = home_directory(safe)
source_dir = os.path.join(local_repo, repo_dirname)
dest_dir = os.path.join(home_dir, repo_dirname)
if os.path.exists(dest_dir):
if args.verbose: print('Skipping {}'.format(safe))
else:
if args.verbose: print(safe)
out = subprocess.check_output(['cp', '-a', source_dir, dest_dir])
# main
parser = argparse.ArgumentParser(description='Pre-clone course assets.')
parser.add_argument('-f', dest='filename', required=True,
help='File containing user emails')
parser.add_argument('-r', dest='repo', default=repo,
help='Course asset repo')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Be verbose.')
args = parser.parse_args()
repo_dirname = os.path.basename(args.repo).split('.')[0]
if not os.path.exists(local_repo):
os.mkdir(local_repo)
git_clone()
f = open(args.filename)
line = f.readline()
while line != '':
email = line.strip()
if '@berkeley.edu' not in email: continue # just in case
username = email.split('@')[0]
copy_repo(username)
line = f.readline()
# vim: set et ts=4 sw=4:
|
Rename pre-pull and move to scripts/.
|
Rename pre-pull and move to scripts/.
|
Python
|
bsd-3-clause
|
berkeley-dsep-infra/datahub,gunjanbaid/datahub,ryanlovett/datahub,ryanlovett/datahub,ryanlovett/datahub,berkeley-dsep-infra/datahub,berkeley-dsep-infra/datahub,gunjanbaid/datahub
|
Rename pre-pull and move to scripts/.
|
#!/usr/bin/python
import argparse
import os
import sys
import string
import subprocess
import escapism
safe_chars = set(string.ascii_lowercase + string.digits)
repo = 'https://github.com/data-8/materials-fa17.git'
local_repo = '/export/pool0/homes/_repo'
cwd_tmpl = '/export/pool0/homes/{}'
def safe_username(username):
return escapism.escape(username, safe=safe_chars, escape_char='-').lower()
def home_directory(username):
home_dir = cwd_tmpl.format(username)
if not os.path.exists(home_dir):
os.mkdir(home_dir)
return home_dir
def git_clone():
if os.path.exists(os.path.join(local_repo, repo_dirname)):
return
out = subprocess.check_output(['git', 'clone', args.repo],
cwd=local_repo).decode('utf-8')
def copy_repo(username):
safe = safe_username(username)
home_dir = home_directory(safe)
source_dir = os.path.join(local_repo, repo_dirname)
dest_dir = os.path.join(home_dir, repo_dirname)
if os.path.exists(dest_dir):
if args.verbose: print('Skipping {}'.format(safe))
else:
if args.verbose: print(safe)
out = subprocess.check_output(['cp', '-a', source_dir, dest_dir])
# main
parser = argparse.ArgumentParser(description='Pre-clone course assets.')
parser.add_argument('-f', dest='filename', required=True,
help='File containing user emails')
parser.add_argument('-r', dest='repo', default=repo,
help='Course asset repo')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Be verbose.')
args = parser.parse_args()
repo_dirname = os.path.basename(args.repo).split('.')[0]
if not os.path.exists(local_repo):
os.mkdir(local_repo)
git_clone()
f = open(args.filename)
line = f.readline()
while line != '':
email = line.strip()
if '@berkeley.edu' not in email: continue # just in case
username = email.split('@')[0]
copy_repo(username)
line = f.readline()
# vim: set et ts=4 sw=4:
|
<commit_before><commit_msg>Rename pre-pull and move to scripts/.<commit_after>
|
#!/usr/bin/python
import argparse
import os
import sys
import string
import subprocess
import escapism
safe_chars = set(string.ascii_lowercase + string.digits)
repo = 'https://github.com/data-8/materials-fa17.git'
local_repo = '/export/pool0/homes/_repo'
cwd_tmpl = '/export/pool0/homes/{}'
def safe_username(username):
return escapism.escape(username, safe=safe_chars, escape_char='-').lower()
def home_directory(username):
home_dir = cwd_tmpl.format(username)
if not os.path.exists(home_dir):
os.mkdir(home_dir)
return home_dir
def git_clone():
if os.path.exists(os.path.join(local_repo, repo_dirname)):
return
out = subprocess.check_output(['git', 'clone', args.repo],
cwd=local_repo).decode('utf-8')
def copy_repo(username):
safe = safe_username(username)
home_dir = home_directory(safe)
source_dir = os.path.join(local_repo, repo_dirname)
dest_dir = os.path.join(home_dir, repo_dirname)
if os.path.exists(dest_dir):
if args.verbose: print('Skipping {}'.format(safe))
else:
if args.verbose: print(safe)
out = subprocess.check_output(['cp', '-a', source_dir, dest_dir])
# main
parser = argparse.ArgumentParser(description='Pre-clone course assets.')
parser.add_argument('-f', dest='filename', required=True,
help='File containing user emails')
parser.add_argument('-r', dest='repo', default=repo,
help='Course asset repo')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Be verbose.')
args = parser.parse_args()
repo_dirname = os.path.basename(args.repo).split('.')[0]
if not os.path.exists(local_repo):
os.mkdir(local_repo)
git_clone()
f = open(args.filename)
line = f.readline()
while line != '':
email = line.strip()
if '@berkeley.edu' not in email: continue # just in case
username = email.split('@')[0]
copy_repo(username)
line = f.readline()
# vim: set et ts=4 sw=4:
|
Rename pre-pull and move to scripts/.#!/usr/bin/python
import argparse
import os
import sys
import string
import subprocess
import escapism
safe_chars = set(string.ascii_lowercase + string.digits)
repo = 'https://github.com/data-8/materials-fa17.git'
local_repo = '/export/pool0/homes/_repo'
cwd_tmpl = '/export/pool0/homes/{}'
def safe_username(username):
return escapism.escape(username, safe=safe_chars, escape_char='-').lower()
def home_directory(username):
home_dir = cwd_tmpl.format(username)
if not os.path.exists(home_dir):
os.mkdir(home_dir)
return home_dir
def git_clone():
if os.path.exists(os.path.join(local_repo, repo_dirname)):
return
out = subprocess.check_output(['git', 'clone', args.repo],
cwd=local_repo).decode('utf-8')
def copy_repo(username):
safe = safe_username(username)
home_dir = home_directory(safe)
source_dir = os.path.join(local_repo, repo_dirname)
dest_dir = os.path.join(home_dir, repo_dirname)
if os.path.exists(dest_dir):
if args.verbose: print('Skipping {}'.format(safe))
else:
if args.verbose: print(safe)
out = subprocess.check_output(['cp', '-a', source_dir, dest_dir])
# main
parser = argparse.ArgumentParser(description='Pre-clone course assets.')
parser.add_argument('-f', dest='filename', required=True,
help='File containing user emails')
parser.add_argument('-r', dest='repo', default=repo,
help='Course asset repo')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Be verbose.')
args = parser.parse_args()
repo_dirname = os.path.basename(args.repo).split('.')[0]
if not os.path.exists(local_repo):
os.mkdir(local_repo)
git_clone()
f = open(args.filename)
line = f.readline()
while line != '':
email = line.strip()
if '@berkeley.edu' not in email: continue # just in case
username = email.split('@')[0]
copy_repo(username)
line = f.readline()
# vim: set et ts=4 sw=4:
|
<commit_before><commit_msg>Rename pre-pull and move to scripts/.<commit_after>#!/usr/bin/python
import argparse
import os
import sys
import string
import subprocess
import escapism
safe_chars = set(string.ascii_lowercase + string.digits)
repo = 'https://github.com/data-8/materials-fa17.git'
local_repo = '/export/pool0/homes/_repo'
cwd_tmpl = '/export/pool0/homes/{}'
def safe_username(username):
return escapism.escape(username, safe=safe_chars, escape_char='-').lower()
def home_directory(username):
home_dir = cwd_tmpl.format(username)
if not os.path.exists(home_dir):
os.mkdir(home_dir)
return home_dir
def git_clone():
if os.path.exists(os.path.join(local_repo, repo_dirname)):
return
out = subprocess.check_output(['git', 'clone', args.repo],
cwd=local_repo).decode('utf-8')
def copy_repo(username):
safe = safe_username(username)
home_dir = home_directory(safe)
source_dir = os.path.join(local_repo, repo_dirname)
dest_dir = os.path.join(home_dir, repo_dirname)
if os.path.exists(dest_dir):
if args.verbose: print('Skipping {}'.format(safe))
else:
if args.verbose: print(safe)
out = subprocess.check_output(['cp', '-a', source_dir, dest_dir])
# main
parser = argparse.ArgumentParser(description='Pre-clone course assets.')
parser.add_argument('-f', dest='filename', required=True,
help='File containing user emails')
parser.add_argument('-r', dest='repo', default=repo,
help='Course asset repo')
parser.add_argument('-v', dest='verbose', action='store_true',
help='Be verbose.')
args = parser.parse_args()
repo_dirname = os.path.basename(args.repo).split('.')[0]
if not os.path.exists(local_repo):
os.mkdir(local_repo)
git_clone()
f = open(args.filename)
line = f.readline()
while line != '':
email = line.strip()
if '@berkeley.edu' not in email: continue # just in case
username = email.split('@')[0]
copy_repo(username)
line = f.readline()
# vim: set et ts=4 sw=4:
|
|
27b6b0b62624ccd80345768f664ae2f421f85368
|
h2o-py/tests/testdir_misc/pyunit_mean_per_class_error.py
|
h2o-py/tests/testdir_misc/pyunit_mean_per_class_error.py
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pyunit_mean_per_class_error():
gbm = H2OGradientBoostingEstimator(nfolds=3, fold_assignment="Random", seed=1234)
## Binomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution = "bernoulli"
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
print(gbm)
mpce = gbm.mean_per_class_error([0.5,0.8]) ## different thresholds
assert (abs(mpce[0][1] - 0.004132231404958664) < 1e-5)
assert (abs(mpce[1][1] - 0.021390374331550777) < 1e-5)
## Multinomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
print(gbm)
mpce = gbm.mean_per_class_error(train=True)
assert( mpce == 0 )
mpce = gbm.mean_per_class_error(valid=True)
assert(abs(mpce - 0.207142857143 ) < 1e-5)
mpce = gbm.mean_per_class_error(xval=True)
assert(abs(mpce - 0.350071715433 ) < 1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_mean_per_class_error)
else:
pyunit_mean_per_class_error
|
Add extra Python test for mean-per-class error.
|
PUBDEV-2979: Add extra Python test for mean-per-class error.
|
Python
|
apache-2.0
|
spennihana/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,mathemage/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,michalkurka/h2o-3
|
PUBDEV-2979: Add extra Python test for mean-per-class error.
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pyunit_mean_per_class_error():
gbm = H2OGradientBoostingEstimator(nfolds=3, fold_assignment="Random", seed=1234)
## Binomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution = "bernoulli"
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
print(gbm)
mpce = gbm.mean_per_class_error([0.5,0.8]) ## different thresholds
assert (abs(mpce[0][1] - 0.004132231404958664) < 1e-5)
assert (abs(mpce[1][1] - 0.021390374331550777) < 1e-5)
## Multinomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
print(gbm)
mpce = gbm.mean_per_class_error(train=True)
assert( mpce == 0 )
mpce = gbm.mean_per_class_error(valid=True)
assert(abs(mpce - 0.207142857143 ) < 1e-5)
mpce = gbm.mean_per_class_error(xval=True)
assert(abs(mpce - 0.350071715433 ) < 1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_mean_per_class_error)
else:
pyunit_mean_per_class_error
|
<commit_before><commit_msg>PUBDEV-2979: Add extra Python test for mean-per-class error.<commit_after>
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pyunit_mean_per_class_error():
gbm = H2OGradientBoostingEstimator(nfolds=3, fold_assignment="Random", seed=1234)
## Binomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution = "bernoulli"
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
print(gbm)
mpce = gbm.mean_per_class_error([0.5,0.8]) ## different thresholds
assert (abs(mpce[0][1] - 0.004132231404958664) < 1e-5)
assert (abs(mpce[1][1] - 0.021390374331550777) < 1e-5)
## Multinomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
print(gbm)
mpce = gbm.mean_per_class_error(train=True)
assert( mpce == 0 )
mpce = gbm.mean_per_class_error(valid=True)
assert(abs(mpce - 0.207142857143 ) < 1e-5)
mpce = gbm.mean_per_class_error(xval=True)
assert(abs(mpce - 0.350071715433 ) < 1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_mean_per_class_error)
else:
pyunit_mean_per_class_error
|
PUBDEV-2979: Add extra Python test for mean-per-class error.import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pyunit_mean_per_class_error():
gbm = H2OGradientBoostingEstimator(nfolds=3, fold_assignment="Random", seed=1234)
## Binomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution = "bernoulli"
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
print(gbm)
mpce = gbm.mean_per_class_error([0.5,0.8]) ## different thresholds
assert (abs(mpce[0][1] - 0.004132231404958664) < 1e-5)
assert (abs(mpce[1][1] - 0.021390374331550777) < 1e-5)
## Multinomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
print(gbm)
mpce = gbm.mean_per_class_error(train=True)
assert( mpce == 0 )
mpce = gbm.mean_per_class_error(valid=True)
assert(abs(mpce - 0.207142857143 ) < 1e-5)
mpce = gbm.mean_per_class_error(xval=True)
assert(abs(mpce - 0.350071715433 ) < 1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_mean_per_class_error)
else:
pyunit_mean_per_class_error
|
<commit_before><commit_msg>PUBDEV-2979: Add extra Python test for mean-per-class error.<commit_after>import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def pyunit_mean_per_class_error():
gbm = H2OGradientBoostingEstimator(nfolds=3, fold_assignment="Random", seed=1234)
## Binomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution = "bernoulli"
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
print(gbm)
mpce = gbm.mean_per_class_error([0.5,0.8]) ## different thresholds
assert (abs(mpce[0][1] - 0.004132231404958664) < 1e-5)
assert (abs(mpce[1][1] - 0.021390374331550777) < 1e-5)
## Multinomial
cars = h2o.import_file("/users/arno/h2o-3/smalldata/junit/cars_20mpg.csv")
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif(seed=1234)
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
print(gbm)
mpce = gbm.mean_per_class_error(train=True)
assert( mpce == 0 )
mpce = gbm.mean_per_class_error(valid=True)
assert(abs(mpce - 0.207142857143 ) < 1e-5)
mpce = gbm.mean_per_class_error(xval=True)
assert(abs(mpce - 0.350071715433 ) < 1e-5)
if __name__ == "__main__":
pyunit_utils.standalone_test(pyunit_mean_per_class_error)
else:
pyunit_mean_per_class_error
|
|
bcd45f883b6442f7a1205979bdc79c69ccd2a623
|
migrations/versions/4d5d239d53b4_set_on_delete_cascad.py
|
migrations/versions/4d5d239d53b4_set_on_delete_cascad.py
|
"""Set ON DELETE CASCADE on TestSuite.*
Revision ID: 4d5d239d53b4
Revises: 501983249c94
Create Date: 2013-12-23 16:14:08.812850
"""
# revision identifiers, used by Alembic.
revision = '4d5d239d53b4'
down_revision = '501983249c94'
from alembic import op
def upgrade():
op.drop_constraint('testsuite_project_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_project_id_fkey', 'testsuite', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('testsuite_build_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_build_id_fkey', 'testsuite', 'build', ['build_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
Add missing TestSuite ON DELETE UPDATE constraints
|
Add missing TestSuite ON DELETE UPDATE constraints
|
Python
|
apache-2.0
|
wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes
|
Add missing TestSuite ON DELETE UPDATE constraints
|
"""Set ON DELETE CASCADE on TestSuite.*
Revision ID: 4d5d239d53b4
Revises: 501983249c94
Create Date: 2013-12-23 16:14:08.812850
"""
# revision identifiers, used by Alembic.
revision = '4d5d239d53b4'
down_revision = '501983249c94'
from alembic import op
def upgrade():
op.drop_constraint('testsuite_project_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_project_id_fkey', 'testsuite', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('testsuite_build_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_build_id_fkey', 'testsuite', 'build', ['build_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
<commit_before><commit_msg>Add missing TestSuite ON DELETE UPDATE constraints<commit_after>
|
"""Set ON DELETE CASCADE on TestSuite.*
Revision ID: 4d5d239d53b4
Revises: 501983249c94
Create Date: 2013-12-23 16:14:08.812850
"""
# revision identifiers, used by Alembic.
revision = '4d5d239d53b4'
down_revision = '501983249c94'
from alembic import op
def upgrade():
op.drop_constraint('testsuite_project_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_project_id_fkey', 'testsuite', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('testsuite_build_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_build_id_fkey', 'testsuite', 'build', ['build_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
Add missing TestSuite ON DELETE UPDATE constraints"""Set ON DELETE CASCADE on TestSuite.*
Revision ID: 4d5d239d53b4
Revises: 501983249c94
Create Date: 2013-12-23 16:14:08.812850
"""
# revision identifiers, used by Alembic.
revision = '4d5d239d53b4'
down_revision = '501983249c94'
from alembic import op
def upgrade():
op.drop_constraint('testsuite_project_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_project_id_fkey', 'testsuite', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('testsuite_build_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_build_id_fkey', 'testsuite', 'build', ['build_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
<commit_before><commit_msg>Add missing TestSuite ON DELETE UPDATE constraints<commit_after>"""Set ON DELETE CASCADE on TestSuite.*
Revision ID: 4d5d239d53b4
Revises: 501983249c94
Create Date: 2013-12-23 16:14:08.812850
"""
# revision identifiers, used by Alembic.
revision = '4d5d239d53b4'
down_revision = '501983249c94'
from alembic import op
def upgrade():
op.drop_constraint('testsuite_project_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_project_id_fkey', 'testsuite', 'project', ['project_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('testsuite_build_id_fkey', 'testsuite')
op.create_foreign_key('testsuite_build_id_fkey', 'testsuite', 'build', ['build_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
|
82f72fe184ecb37049ec7b7b13a8e9c05936ee7a
|
migrations/versions/5981b26ae993_drop_trackman_tables.py
|
migrations/versions/5981b26ae993_drop_trackman_tables.py
|
"""Drop Trackman tables
Revision ID: 5981b26ae993
Revises: 804fb3dc434f
Create Date: 2018-05-19 23:57:42.897891
"""
# revision identifiers, used by Alembic.
revision = '5981b26ae993'
down_revision = '804fb3dc434f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('air_log')
op.drop_table('tracklog')
op.drop_table('trackreport')
op.drop_table('track')
op.drop_table('djset')
op.drop_table('dj')
op.drop_table('rotation')
def downgrade():
raise Exception("Downgrade to previous versions is unsupported.")
|
Add migration to drop Trackman tables
|
Add migration to drop Trackman tables
|
Python
|
agpl-3.0
|
wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site,wuvt/wuvt-site
|
Add migration to drop Trackman tables
|
"""Drop Trackman tables
Revision ID: 5981b26ae993
Revises: 804fb3dc434f
Create Date: 2018-05-19 23:57:42.897891
"""
# revision identifiers, used by Alembic.
revision = '5981b26ae993'
down_revision = '804fb3dc434f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('air_log')
op.drop_table('tracklog')
op.drop_table('trackreport')
op.drop_table('track')
op.drop_table('djset')
op.drop_table('dj')
op.drop_table('rotation')
def downgrade():
raise Exception("Downgrade to previous versions is unsupported.")
|
<commit_before><commit_msg>Add migration to drop Trackman tables<commit_after>
|
"""Drop Trackman tables
Revision ID: 5981b26ae993
Revises: 804fb3dc434f
Create Date: 2018-05-19 23:57:42.897891
"""
# revision identifiers, used by Alembic.
revision = '5981b26ae993'
down_revision = '804fb3dc434f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('air_log')
op.drop_table('tracklog')
op.drop_table('trackreport')
op.drop_table('track')
op.drop_table('djset')
op.drop_table('dj')
op.drop_table('rotation')
def downgrade():
raise Exception("Downgrade to previous versions is unsupported.")
|
Add migration to drop Trackman tables"""Drop Trackman tables
Revision ID: 5981b26ae993
Revises: 804fb3dc434f
Create Date: 2018-05-19 23:57:42.897891
"""
# revision identifiers, used by Alembic.
revision = '5981b26ae993'
down_revision = '804fb3dc434f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('air_log')
op.drop_table('tracklog')
op.drop_table('trackreport')
op.drop_table('track')
op.drop_table('djset')
op.drop_table('dj')
op.drop_table('rotation')
def downgrade():
raise Exception("Downgrade to previous versions is unsupported.")
|
<commit_before><commit_msg>Add migration to drop Trackman tables<commit_after>"""Drop Trackman tables
Revision ID: 5981b26ae993
Revises: 804fb3dc434f
Create Date: 2018-05-19 23:57:42.897891
"""
# revision identifiers, used by Alembic.
revision = '5981b26ae993'
down_revision = '804fb3dc434f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('air_log')
op.drop_table('tracklog')
op.drop_table('trackreport')
op.drop_table('track')
op.drop_table('djset')
op.drop_table('dj')
op.drop_table('rotation')
def downgrade():
raise Exception("Downgrade to previous versions is unsupported.")
|
|
32a9a608a6489528089b68857965afdc4b7c76cc
|
tests/test_sql_copy.py
|
tests/test_sql_copy.py
|
import os
import pytest
import time
from carto.exceptions import CartoException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
SETUP_QUERIES = [
'DROP TABLE IF EXISTS carto_python_sdk_copy_test',
"""
CREATE TABLE carto_python_sdk_copy_test (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""",
"SELECT CDB_CartodbfyTable(current_schema, 'carto_python_sdk_copy_test')"
]
BATCH_TERMINAL_STATES = ['done', 'failed', 'cancelled', 'unknown']
# Please note the newline characters to delimit rows
TABLE_CONTENTS=[
b'the_geom,name,age\n',
b'SRID=4326;POINT(-126 54),North West,89\n',
b'SRID=4326;POINT(-96 34),South East,99\n',
b'SRID=4326;POINT(-6 -25),Souther Easter,124\n'
]
def test_copyfrom(api_key_auth_client_usr):
# Create a table suitable for testing
batch_client = BatchSQLClient(api_key_auth_client_usr)
job = batch_client.create(SETUP_QUERIES)
while not job['status'] in BATCH_TERMINAL_STATES:
time.sleep(1)
job = batch_client.read(job['job_id'])
assert job['status'] == 'done'
copy_client = CopySQLClient(api_key_auth_client_usr)
query = 'COPY carto_python_sdk_copy_test (the_geom, name, age) FROM stdin WITH (FORMAT csv, HEADER true)'
data = iter(TABLE_CONTENTS)
result = copy_client.copyfrom(query, data)
|
Add test file for COPY client
|
Add test file for COPY client
|
Python
|
bsd-3-clause
|
CartoDB/cartodb-python,CartoDB/carto-python
|
Add test file for COPY client
|
import os
import pytest
import time
from carto.exceptions import CartoException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
SETUP_QUERIES = [
'DROP TABLE IF EXISTS carto_python_sdk_copy_test',
"""
CREATE TABLE carto_python_sdk_copy_test (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""",
"SELECT CDB_CartodbfyTable(current_schema, 'carto_python_sdk_copy_test')"
]
BATCH_TERMINAL_STATES = ['done', 'failed', 'cancelled', 'unknown']
# Please note the newline characters to delimit rows
TABLE_CONTENTS=[
b'the_geom,name,age\n',
b'SRID=4326;POINT(-126 54),North West,89\n',
b'SRID=4326;POINT(-96 34),South East,99\n',
b'SRID=4326;POINT(-6 -25),Souther Easter,124\n'
]
def test_copyfrom(api_key_auth_client_usr):
# Create a table suitable for testing
batch_client = BatchSQLClient(api_key_auth_client_usr)
job = batch_client.create(SETUP_QUERIES)
while not job['status'] in BATCH_TERMINAL_STATES:
time.sleep(1)
job = batch_client.read(job['job_id'])
assert job['status'] == 'done'
copy_client = CopySQLClient(api_key_auth_client_usr)
query = 'COPY carto_python_sdk_copy_test (the_geom, name, age) FROM stdin WITH (FORMAT csv, HEADER true)'
data = iter(TABLE_CONTENTS)
result = copy_client.copyfrom(query, data)
|
<commit_before><commit_msg>Add test file for COPY client<commit_after>
|
import os
import pytest
import time
from carto.exceptions import CartoException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
SETUP_QUERIES = [
'DROP TABLE IF EXISTS carto_python_sdk_copy_test',
"""
CREATE TABLE carto_python_sdk_copy_test (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""",
"SELECT CDB_CartodbfyTable(current_schema, 'carto_python_sdk_copy_test')"
]
BATCH_TERMINAL_STATES = ['done', 'failed', 'cancelled', 'unknown']
# Please note the newline characters to delimit rows
TABLE_CONTENTS=[
b'the_geom,name,age\n',
b'SRID=4326;POINT(-126 54),North West,89\n',
b'SRID=4326;POINT(-96 34),South East,99\n',
b'SRID=4326;POINT(-6 -25),Souther Easter,124\n'
]
def test_copyfrom(api_key_auth_client_usr):
# Create a table suitable for testing
batch_client = BatchSQLClient(api_key_auth_client_usr)
job = batch_client.create(SETUP_QUERIES)
while not job['status'] in BATCH_TERMINAL_STATES:
time.sleep(1)
job = batch_client.read(job['job_id'])
assert job['status'] == 'done'
copy_client = CopySQLClient(api_key_auth_client_usr)
query = 'COPY carto_python_sdk_copy_test (the_geom, name, age) FROM stdin WITH (FORMAT csv, HEADER true)'
data = iter(TABLE_CONTENTS)
result = copy_client.copyfrom(query, data)
|
Add test file for COPY clientimport os
import pytest
import time
from carto.exceptions import CartoException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
SETUP_QUERIES = [
'DROP TABLE IF EXISTS carto_python_sdk_copy_test',
"""
CREATE TABLE carto_python_sdk_copy_test (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""",
"SELECT CDB_CartodbfyTable(current_schema, 'carto_python_sdk_copy_test')"
]
BATCH_TERMINAL_STATES = ['done', 'failed', 'cancelled', 'unknown']
# Please note the newline characters to delimit rows
TABLE_CONTENTS=[
b'the_geom,name,age\n',
b'SRID=4326;POINT(-126 54),North West,89\n',
b'SRID=4326;POINT(-96 34),South East,99\n',
b'SRID=4326;POINT(-6 -25),Souther Easter,124\n'
]
def test_copyfrom(api_key_auth_client_usr):
# Create a table suitable for testing
batch_client = BatchSQLClient(api_key_auth_client_usr)
job = batch_client.create(SETUP_QUERIES)
while not job['status'] in BATCH_TERMINAL_STATES:
time.sleep(1)
job = batch_client.read(job['job_id'])
assert job['status'] == 'done'
copy_client = CopySQLClient(api_key_auth_client_usr)
query = 'COPY carto_python_sdk_copy_test (the_geom, name, age) FROM stdin WITH (FORMAT csv, HEADER true)'
data = iter(TABLE_CONTENTS)
result = copy_client.copyfrom(query, data)
|
<commit_before><commit_msg>Add test file for COPY client<commit_after>import os
import pytest
import time
from carto.exceptions import CartoException
from carto.sql import SQLClient, BatchSQLClient, CopySQLClient
SETUP_QUERIES = [
'DROP TABLE IF EXISTS carto_python_sdk_copy_test',
"""
CREATE TABLE carto_python_sdk_copy_test (
the_geom geometry(Geometry,4326),
name text,
age integer
)
""",
"SELECT CDB_CartodbfyTable(current_schema, 'carto_python_sdk_copy_test')"
]
BATCH_TERMINAL_STATES = ['done', 'failed', 'cancelled', 'unknown']
# Please note the newline characters to delimit rows
TABLE_CONTENTS=[
b'the_geom,name,age\n',
b'SRID=4326;POINT(-126 54),North West,89\n',
b'SRID=4326;POINT(-96 34),South East,99\n',
b'SRID=4326;POINT(-6 -25),Souther Easter,124\n'
]
def test_copyfrom(api_key_auth_client_usr):
# Create a table suitable for testing
batch_client = BatchSQLClient(api_key_auth_client_usr)
job = batch_client.create(SETUP_QUERIES)
while not job['status'] in BATCH_TERMINAL_STATES:
time.sleep(1)
job = batch_client.read(job['job_id'])
assert job['status'] == 'done'
copy_client = CopySQLClient(api_key_auth_client_usr)
query = 'COPY carto_python_sdk_copy_test (the_geom, name, age) FROM stdin WITH (FORMAT csv, HEADER true)'
data = iter(TABLE_CONTENTS)
result = copy_client.copyfrom(query, data)
|
|
801ea808caa4cde2b1cb84f52d5d516d39b30e88
|
tester_alex_10q.py
|
tester_alex_10q.py
|
from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from SecFiling10Q import SecFiling10Q
def areEqual(expect, val, eps = 0.01):
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}.\n".format(expect, val, diff)
assert expect * val >= 0.0, "Values don't have the same sign: expected= {:f}, found= {:f}.\n".format(expect, val)
except BaseException as be:
print(be)
ticker = "ALEX"
all10Qs = pd.read_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", \
dtype={'cik':str, 'conm':str, 'type':str, 'path':str, 'date':str})
testfile = all10Qs[all10Qs.date == "2018-05-10"]
filing = SecFiling10Q(ticker)
filename = filing.download(testfile.cik.iloc[0], testfile.conm.iloc[0], testfile.type.iloc[0], \
testfile.date.iloc[0], testfile.path.iloc[0], downloadPath = "TestData\\")
## Load the data, and proceed if successful.
if filing.load(filename):
print("Verifying EPS")
areEqual(0.71, filing.getEps())
print("Verifying Sales")
areEqual(113.3*1e6, filing.getSales())
print("Verifying ROE")
areEqual(47.3/1320.6, filing.getRoe())
print("Verifying Net Income")
areEqual(47.3*1e6, filing.getNetIncome())
print("Verifying Stockholders' Equity")
areEqual(1320.6*1e6, filing.getStockholdersEquity())
|
Test the SecFiling10Q for an ALEX 10-q filing.
|
Test the SecFiling10Q for an ALEX 10-q filing.
|
Python
|
agpl-3.0
|
cielling/jupyternbs
|
Test the SecFiling10Q for an ALEX 10-q filing.
|
from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from SecFiling10Q import SecFiling10Q
def areEqual(expect, val, eps = 0.01):
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}.\n".format(expect, val, diff)
assert expect * val >= 0.0, "Values don't have the same sign: expected= {:f}, found= {:f}.\n".format(expect, val)
except BaseException as be:
print(be)
ticker = "ALEX"
all10Qs = pd.read_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", \
dtype={'cik':str, 'conm':str, 'type':str, 'path':str, 'date':str})
testfile = all10Qs[all10Qs.date == "2018-05-10"]
filing = SecFiling10Q(ticker)
filename = filing.download(testfile.cik.iloc[0], testfile.conm.iloc[0], testfile.type.iloc[0], \
testfile.date.iloc[0], testfile.path.iloc[0], downloadPath = "TestData\\")
## Load the data, and proceed if successful.
if filing.load(filename):
print("Verifying EPS")
areEqual(0.71, filing.getEps())
print("Verifying Sales")
areEqual(113.3*1e6, filing.getSales())
print("Verifying ROE")
areEqual(47.3/1320.6, filing.getRoe())
print("Verifying Net Income")
areEqual(47.3*1e6, filing.getNetIncome())
print("Verifying Stockholders' Equity")
areEqual(1320.6*1e6, filing.getStockholdersEquity())
|
<commit_before><commit_msg>Test the SecFiling10Q for an ALEX 10-q filing.<commit_after>
|
from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from SecFiling10Q import SecFiling10Q
def areEqual(expect, val, eps = 0.01):
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}.\n".format(expect, val, diff)
assert expect * val >= 0.0, "Values don't have the same sign: expected= {:f}, found= {:f}.\n".format(expect, val)
except BaseException as be:
print(be)
ticker = "ALEX"
all10Qs = pd.read_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", \
dtype={'cik':str, 'conm':str, 'type':str, 'path':str, 'date':str})
testfile = all10Qs[all10Qs.date == "2018-05-10"]
filing = SecFiling10Q(ticker)
filename = filing.download(testfile.cik.iloc[0], testfile.conm.iloc[0], testfile.type.iloc[0], \
testfile.date.iloc[0], testfile.path.iloc[0], downloadPath = "TestData\\")
## Load the data, and proceed if successful.
if filing.load(filename):
print("Verifying EPS")
areEqual(0.71, filing.getEps())
print("Verifying Sales")
areEqual(113.3*1e6, filing.getSales())
print("Verifying ROE")
areEqual(47.3/1320.6, filing.getRoe())
print("Verifying Net Income")
areEqual(47.3*1e6, filing.getNetIncome())
print("Verifying Stockholders' Equity")
areEqual(1320.6*1e6, filing.getStockholdersEquity())
|
Test the SecFiling10Q for an ALEX 10-q filing.from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from SecFiling10Q import SecFiling10Q
def areEqual(expect, val, eps = 0.01):
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}.\n".format(expect, val, diff)
assert expect * val >= 0.0, "Values don't have the same sign: expected= {:f}, found= {:f}.\n".format(expect, val)
except BaseException as be:
print(be)
ticker = "ALEX"
all10Qs = pd.read_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", \
dtype={'cik':str, 'conm':str, 'type':str, 'path':str, 'date':str})
testfile = all10Qs[all10Qs.date == "2018-05-10"]
filing = SecFiling10Q(ticker)
filename = filing.download(testfile.cik.iloc[0], testfile.conm.iloc[0], testfile.type.iloc[0], \
testfile.date.iloc[0], testfile.path.iloc[0], downloadPath = "TestData\\")
## Load the data, and proceed if successful.
if filing.load(filename):
print("Verifying EPS")
areEqual(0.71, filing.getEps())
print("Verifying Sales")
areEqual(113.3*1e6, filing.getSales())
print("Verifying ROE")
areEqual(47.3/1320.6, filing.getRoe())
print("Verifying Net Income")
areEqual(47.3*1e6, filing.getNetIncome())
print("Verifying Stockholders' Equity")
areEqual(1320.6*1e6, filing.getStockholdersEquity())
|
<commit_before><commit_msg>Test the SecFiling10Q for an ALEX 10-q filing.<commit_after>from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from SecFiling10Q import SecFiling10Q
def areEqual(expect, val, eps = 0.01):
try:
diff = abs(float(val) / float(expect) - 1.0)
assert diff < eps, "Values don't match, expected= {:.12f}, found= {:.12f}, diff= {:.12f}.\n".format(expect, val, diff)
assert expect * val >= 0.0, "Values don't have the same sign: expected= {:f}, found= {:f}.\n".format(expect, val)
except BaseException as be:
print(be)
ticker = "ALEX"
all10Qs = pd.read_csv("TestData\\"+ticker.lower()+"_all_10qs.csv", \
dtype={'cik':str, 'conm':str, 'type':str, 'path':str, 'date':str})
testfile = all10Qs[all10Qs.date == "2018-05-10"]
filing = SecFiling10Q(ticker)
filename = filing.download(testfile.cik.iloc[0], testfile.conm.iloc[0], testfile.type.iloc[0], \
testfile.date.iloc[0], testfile.path.iloc[0], downloadPath = "TestData\\")
## Load the data, and proceed if successful.
if filing.load(filename):
print("Verifying EPS")
areEqual(0.71, filing.getEps())
print("Verifying Sales")
areEqual(113.3*1e6, filing.getSales())
print("Verifying ROE")
areEqual(47.3/1320.6, filing.getRoe())
print("Verifying Net Income")
areEqual(47.3*1e6, filing.getNetIncome())
print("Verifying Stockholders' Equity")
areEqual(1320.6*1e6, filing.getStockholdersEquity())
|
|
244e91021d87a52dc985eedc8f974653f290c4bd
|
migrations/versions/0225_another_letter_org.py
|
migrations/versions/0225_another_letter_org.py
|
"""empty message
Revision ID: 0225_another_letter_org
Revises: 0224_returned_letter_status
"""
# revision identifiers, used by Alembic.
revision = '0225_another_letter_org'
down_revision = '0224_returned_letter_status'
from alembic import op
NEW_ORGANISATIONS = [
('512', 'Vale of Glamorgan'),
('513', 'Rother and Wealden'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add two new letter logos
|
Add two new letter logos
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add two new letter logos
|
"""empty message
Revision ID: 0225_another_letter_org
Revises: 0224_returned_letter_status
"""
# revision identifiers, used by Alembic.
revision = '0225_another_letter_org'
down_revision = '0224_returned_letter_status'
from alembic import op
NEW_ORGANISATIONS = [
('512', 'Vale of Glamorgan'),
('513', 'Rother and Wealden'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add two new letter logos<commit_after>
|
"""empty message
Revision ID: 0225_another_letter_org
Revises: 0224_returned_letter_status
"""
# revision identifiers, used by Alembic.
revision = '0225_another_letter_org'
down_revision = '0224_returned_letter_status'
from alembic import op
NEW_ORGANISATIONS = [
('512', 'Vale of Glamorgan'),
('513', 'Rother and Wealden'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add two new letter logos"""empty message
Revision ID: 0225_another_letter_org
Revises: 0224_returned_letter_status
"""
# revision identifiers, used by Alembic.
revision = '0225_another_letter_org'
down_revision = '0224_returned_letter_status'
from alembic import op
NEW_ORGANISATIONS = [
('512', 'Vale of Glamorgan'),
('513', 'Rother and Wealden'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add two new letter logos<commit_after>"""empty message
Revision ID: 0225_another_letter_org
Revises: 0224_returned_letter_status
"""
# revision identifiers, used by Alembic.
revision = '0225_another_letter_org'
down_revision = '0224_returned_letter_status'
from alembic import op
NEW_ORGANISATIONS = [
('512', 'Vale of Glamorgan'),
('513', 'Rother and Wealden'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
730e8b79a8c5c27751cc429f348932bd3638bd85
|
tests/test_base.py
|
tests/test_base.py
|
"""tests/test_base.py.
Tests the base blox to ensure they provide solid functionality for a Python dom representation
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from blox.dom import TagWithChildren
class TestTagWithChildren(object):
pass
|
Add initial test for base block elements
|
Add initial test for base block elements
|
Python
|
mit
|
timothycrosley/blox,timothycrosley/blox,timothycrosley/blox
|
Add initial test for base block elements
|
"""tests/test_base.py.
Tests the base blox to ensure they provide solid functionality for a Python dom representation
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from blox.dom import TagWithChildren
class TestTagWithChildren(object):
pass
|
<commit_before><commit_msg>Add initial test for base block elements<commit_after>
|
"""tests/test_base.py.
Tests the base blox to ensure they provide solid functionality for a Python dom representation
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from blox.dom import TagWithChildren
class TestTagWithChildren(object):
pass
|
Add initial test for base block elements"""tests/test_base.py.
Tests the base blox to ensure they provide solid functionality for a Python dom representation
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from blox.dom import TagWithChildren
class TestTagWithChildren(object):
pass
|
<commit_before><commit_msg>Add initial test for base block elements<commit_after>"""tests/test_base.py.
Tests the base blox to ensure they provide solid functionality for a Python dom representation
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
from blox.dom import TagWithChildren
class TestTagWithChildren(object):
pass
|
|
d2d4e057b3a1de8f2be917aa5b8b3a4c0f5e1dc5
|
tests/unit/test_execeptions.py
|
tests/unit/test_execeptions.py
|
#!/usr/bin/env python
from butter import eventfd, _eventfd
from pytest import raises
import pytest
import errno
@pytest.mark.parametrize('path,module,func,errno,exception', [
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EINVAL, ValueError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EMFILE, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENFILE, OSError), # errno is diffrent to above
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENODEV, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENOMEM, MemoryError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EHOSTDOWN, ValueError), # errno chosen as unused in our code
])
@pytest.mark.unit
def test_exception(mocker, path, module, func, errno, exception):
# patch the underlying function as exposed by cffi
m = mocker.patch(path)
# -1 forces most of our code to check ffi.errno
m.return_value = -1
# Make the C level errno the val we want
module.ffi.errno = errno
# Call the same function as the user and wait for it to blow up
with raises(exception):
func()
|
Add testing of behavior when syscall fails
|
Add testing of behavior when syscall fails
|
Python
|
bsd-3-clause
|
dasSOZO/python-butter,wdv4758h/butter
|
Add testing of behavior when syscall fails
|
#!/usr/bin/env python
from butter import eventfd, _eventfd
from pytest import raises
import pytest
import errno
@pytest.mark.parametrize('path,module,func,errno,exception', [
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EINVAL, ValueError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EMFILE, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENFILE, OSError), # errno is diffrent to above
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENODEV, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENOMEM, MemoryError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EHOSTDOWN, ValueError), # errno chosen as unused in our code
])
@pytest.mark.unit
def test_exception(mocker, path, module, func, errno, exception):
# patch the underlying function as exposed by cffi
m = mocker.patch(path)
# -1 forces most of our code to check ffi.errno
m.return_value = -1
# Make the C level errno the val we want
module.ffi.errno = errno
# Call the same function as the user and wait for it to blow up
with raises(exception):
func()
|
<commit_before><commit_msg>Add testing of behavior when syscall fails<commit_after>
|
#!/usr/bin/env python
from butter import eventfd, _eventfd
from pytest import raises
import pytest
import errno
@pytest.mark.parametrize('path,module,func,errno,exception', [
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EINVAL, ValueError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EMFILE, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENFILE, OSError), # errno is diffrent to above
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENODEV, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENOMEM, MemoryError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EHOSTDOWN, ValueError), # errno chosen as unused in our code
])
@pytest.mark.unit
def test_exception(mocker, path, module, func, errno, exception):
# patch the underlying function as exposed by cffi
m = mocker.patch(path)
# -1 forces most of our code to check ffi.errno
m.return_value = -1
# Make the C level errno the val we want
module.ffi.errno = errno
# Call the same function as the user and wait for it to blow up
with raises(exception):
func()
|
Add testing of behavior when syscall fails#!/usr/bin/env python
from butter import eventfd, _eventfd
from pytest import raises
import pytest
import errno
@pytest.mark.parametrize('path,module,func,errno,exception', [
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EINVAL, ValueError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EMFILE, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENFILE, OSError), # errno is diffrent to above
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENODEV, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENOMEM, MemoryError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EHOSTDOWN, ValueError), # errno chosen as unused in our code
])
@pytest.mark.unit
def test_exception(mocker, path, module, func, errno, exception):
# patch the underlying function as exposed by cffi
m = mocker.patch(path)
# -1 forces most of our code to check ffi.errno
m.return_value = -1
# Make the C level errno the val we want
module.ffi.errno = errno
# Call the same function as the user and wait for it to blow up
with raises(exception):
func()
|
<commit_before><commit_msg>Add testing of behavior when syscall fails<commit_after>#!/usr/bin/env python
from butter import eventfd, _eventfd
from pytest import raises
import pytest
import errno
@pytest.mark.parametrize('path,module,func,errno,exception', [
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EINVAL, ValueError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EMFILE, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENFILE, OSError), # errno is diffrent to above
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENODEV, OSError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.ENOMEM, MemoryError),
('butter._eventfd.C.eventfd', _eventfd, _eventfd.eventfd, errno.EHOSTDOWN, ValueError), # errno chosen as unused in our code
])
@pytest.mark.unit
def test_exception(mocker, path, module, func, errno, exception):
# patch the underlying function as exposed by cffi
m = mocker.patch(path)
# -1 forces most of our code to check ffi.errno
m.return_value = -1
# Make the C level errno the val we want
module.ffi.errno = errno
# Call the same function as the user and wait for it to blow up
with raises(exception):
func()
|
|
612d2e3f749244c00404d97d93982c88538ce8aa
|
mk_json_query.py
|
mk_json_query.py
|
#python mk_json_quert.py & excute it in backgroud
# excute ES query and export it to apache directory for data visulization
import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import os.path
import time
import json
#JSON export for further data visulization
FILE_PATH_JSON1 = "/var/www/cache_info1.json"
#ES information
ES_HOST = {
"host" : "10.0.0.158",
"port" : 9200
}
INDEX_NAME = 'ats'
TYPE_NAME = 'accesslog'
POLL_INTERVAL = 10 #10 seconds
mBody = {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"range": {
"accessTime": {
"gte": "now-1d/d"
}
}
}
}
},
"size": 0,
"aggregations": {
"accessTime_1h": {
"date_histogram": {
"field": "accessTime",
"interval": "1h",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
},
"accessTime_5m": {
"date_histogram": {
"field": "accessTime",
"interval": "5m",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
}
}
}
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
def exportInfo(mBody,filePath):
#print (mBody)
res = es.search(index = INDEX_NAME, size=0, body = mBody)
#print(res)
f = open(filePath,'w') #clear the contents
f.write(json.dumps(res))
f.close()
#Main function
if __name__ == '__main__':
while (True):
exportInfo(mBody,FILE_PATH_JSON1)
time.sleep(POLL_INTERVAL)
|
Add export data into apache folder script
|
Add export data into apache folder script
|
Python
|
mit
|
yuecong/tools,yuecong/tools,yuecong/tools,yuecong/tools
|
Add export data into apache folder script
|
#python mk_json_quert.py & excute it in backgroud
# excute ES query and export it to apache directory for data visulization
import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import os.path
import time
import json
#JSON export for further data visulization
FILE_PATH_JSON1 = "/var/www/cache_info1.json"
#ES information
ES_HOST = {
"host" : "10.0.0.158",
"port" : 9200
}
INDEX_NAME = 'ats'
TYPE_NAME = 'accesslog'
POLL_INTERVAL = 10 #10 seconds
mBody = {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"range": {
"accessTime": {
"gte": "now-1d/d"
}
}
}
}
},
"size": 0,
"aggregations": {
"accessTime_1h": {
"date_histogram": {
"field": "accessTime",
"interval": "1h",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
},
"accessTime_5m": {
"date_histogram": {
"field": "accessTime",
"interval": "5m",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
}
}
}
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
def exportInfo(mBody,filePath):
#print (mBody)
res = es.search(index = INDEX_NAME, size=0, body = mBody)
#print(res)
f = open(filePath,'w') #clear the contents
f.write(json.dumps(res))
f.close()
#Main function
if __name__ == '__main__':
while (True):
exportInfo(mBody,FILE_PATH_JSON1)
time.sleep(POLL_INTERVAL)
|
<commit_before><commit_msg>Add export data into apache folder script<commit_after>
|
#python mk_json_quert.py & excute it in backgroud
# excute ES query and export it to apache directory for data visulization
import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import os.path
import time
import json
#JSON export for further data visulization
FILE_PATH_JSON1 = "/var/www/cache_info1.json"
#ES information
ES_HOST = {
"host" : "10.0.0.158",
"port" : 9200
}
INDEX_NAME = 'ats'
TYPE_NAME = 'accesslog'
POLL_INTERVAL = 10 #10 seconds
mBody = {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"range": {
"accessTime": {
"gte": "now-1d/d"
}
}
}
}
},
"size": 0,
"aggregations": {
"accessTime_1h": {
"date_histogram": {
"field": "accessTime",
"interval": "1h",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
},
"accessTime_5m": {
"date_histogram": {
"field": "accessTime",
"interval": "5m",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
}
}
}
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
def exportInfo(mBody,filePath):
#print (mBody)
res = es.search(index = INDEX_NAME, size=0, body = mBody)
#print(res)
f = open(filePath,'w') #clear the contents
f.write(json.dumps(res))
f.close()
#Main function
if __name__ == '__main__':
while (True):
exportInfo(mBody,FILE_PATH_JSON1)
time.sleep(POLL_INTERVAL)
|
Add export data into apache folder script#python mk_json_quert.py & excute it in backgroud
# excute ES query and export it to apache directory for data visulization
import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import os.path
import time
import json
#JSON export for further data visulization
FILE_PATH_JSON1 = "/var/www/cache_info1.json"
#ES information
ES_HOST = {
"host" : "10.0.0.158",
"port" : 9200
}
INDEX_NAME = 'ats'
TYPE_NAME = 'accesslog'
POLL_INTERVAL = 10 #10 seconds
mBody = {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"range": {
"accessTime": {
"gte": "now-1d/d"
}
}
}
}
},
"size": 0,
"aggregations": {
"accessTime_1h": {
"date_histogram": {
"field": "accessTime",
"interval": "1h",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
},
"accessTime_5m": {
"date_histogram": {
"field": "accessTime",
"interval": "5m",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
}
}
}
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
def exportInfo(mBody,filePath):
#print (mBody)
res = es.search(index = INDEX_NAME, size=0, body = mBody)
#print(res)
f = open(filePath,'w') #clear the contents
f.write(json.dumps(res))
f.close()
#Main function
if __name__ == '__main__':
while (True):
exportInfo(mBody,FILE_PATH_JSON1)
time.sleep(POLL_INTERVAL)
|
<commit_before><commit_msg>Add export data into apache folder script<commit_after>#python mk_json_quert.py & excute it in backgroud
# excute ES query and export it to apache directory for data visulization
import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import os.path
import time
import json
#JSON export for further data visulization
FILE_PATH_JSON1 = "/var/www/cache_info1.json"
#ES information
ES_HOST = {
"host" : "10.0.0.158",
"port" : 9200
}
INDEX_NAME = 'ats'
TYPE_NAME = 'accesslog'
POLL_INTERVAL = 10 #10 seconds
mBody = {
"query": {
"filtered": {
"query": {
"match_all": {}
},
"filter": {
"range": {
"accessTime": {
"gte": "now-1d/d"
}
}
}
}
},
"size": 0,
"aggregations": {
"accessTime_1h": {
"date_histogram": {
"field": "accessTime",
"interval": "1h",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
},
"accessTime_5m": {
"date_histogram": {
"field": "accessTime",
"interval": "5m",
"order": {
"_key": "desc"
},
"min_doc_count": 0
},
"aggs": {
"hit_ratio": {
"avg": {
"field": "cacheCode"
}
},
"size_access_info": {
"terms": {
"field": "cacheCode",
"size": 2
},
"aggs": {
"sum_cache_size": {
"sum": {
"field": "contentLength"
}
}
}
}
}
}
}
}
# create ES client, create index
es = Elasticsearch(hosts = [ES_HOST])
def exportInfo(mBody,filePath):
#print (mBody)
res = es.search(index = INDEX_NAME, size=0, body = mBody)
#print(res)
f = open(filePath,'w') #clear the contents
f.write(json.dumps(res))
f.close()
#Main function
if __name__ == '__main__':
while (True):
exportInfo(mBody,FILE_PATH_JSON1)
time.sleep(POLL_INTERVAL)
|
|
637b95a59f740314ea82d8dcced67656d150d8a8
|
concept.py
|
concept.py
|
import docker
import dronekit
# connect to the Docker daemon
client = docker.client.from_env()
print("AAA")
# provision a container from an ArduPilot image
image_name = "squareslab/ardubugs:base"
container = client.containers.create(image_name, "/bin/bash", stdin_open=True, detach=True, working_dir="/experiment/source")
container.start()
print(container.status)
print("BBB")
# build SITL
cmd = "./waf configure"
container.exec_run(cmd)
cmd = "./waf build -j8"
container.exec_run(cmd)
# start the SITL inside a container
model = "rover"
speedup = "1.0"
home = "-35.362938,149.165085,584,270"
cmd = 'build/sitl/bin/ardurover --model "{}" --speedup "{}" --home "{}"'.format(model, speedup, home)
container.exec_run(cmd, detach=True)
print("CCC")
# connect to the SITL from the host via dronekit
#port = 14550
#url = "{}:{}".format(container.ip_address, port)
#dronekit.connect(url, wait_ready=True)
|
Create container, build the spurce and run ardurover
|
Create container, build the spurce and run ardurover
|
Python
|
mit
|
squaresLab/Houston,squaresLab/Houston,squaresLab/Houston
|
Create container, build the spurce and run ardurover
|
import docker
import dronekit
# connect to the Docker daemon
client = docker.client.from_env()
print("AAA")
# provision a container from an ArduPilot image
image_name = "squareslab/ardubugs:base"
container = client.containers.create(image_name, "/bin/bash", stdin_open=True, detach=True, working_dir="/experiment/source")
container.start()
print(container.status)
print("BBB")
# build SITL
cmd = "./waf configure"
container.exec_run(cmd)
cmd = "./waf build -j8"
container.exec_run(cmd)
# start the SITL inside a container
model = "rover"
speedup = "1.0"
home = "-35.362938,149.165085,584,270"
cmd = 'build/sitl/bin/ardurover --model "{}" --speedup "{}" --home "{}"'.format(model, speedup, home)
container.exec_run(cmd, detach=True)
print("CCC")
# connect to the SITL from the host via dronekit
#port = 14550
#url = "{}:{}".format(container.ip_address, port)
#dronekit.connect(url, wait_ready=True)
|
<commit_before><commit_msg>Create container, build the spurce and run ardurover<commit_after>
|
import docker
import dronekit
# connect to the Docker daemon
client = docker.client.from_env()
print("AAA")
# provision a container from an ArduPilot image
image_name = "squareslab/ardubugs:base"
container = client.containers.create(image_name, "/bin/bash", stdin_open=True, detach=True, working_dir="/experiment/source")
container.start()
print(container.status)
print("BBB")
# build SITL
cmd = "./waf configure"
container.exec_run(cmd)
cmd = "./waf build -j8"
container.exec_run(cmd)
# start the SITL inside a container
model = "rover"
speedup = "1.0"
home = "-35.362938,149.165085,584,270"
cmd = 'build/sitl/bin/ardurover --model "{}" --speedup "{}" --home "{}"'.format(model, speedup, home)
container.exec_run(cmd, detach=True)
print("CCC")
# connect to the SITL from the host via dronekit
#port = 14550
#url = "{}:{}".format(container.ip_address, port)
#dronekit.connect(url, wait_ready=True)
|
Create container, build the spurce and run arduroverimport docker
import dronekit
# connect to the Docker daemon
client = docker.client.from_env()
print("AAA")
# provision a container from an ArduPilot image
image_name = "squareslab/ardubugs:base"
container = client.containers.create(image_name, "/bin/bash", stdin_open=True, detach=True, working_dir="/experiment/source")
container.start()
print(container.status)
print("BBB")
# build SITL
cmd = "./waf configure"
container.exec_run(cmd)
cmd = "./waf build -j8"
container.exec_run(cmd)
# start the SITL inside a container
model = "rover"
speedup = "1.0"
home = "-35.362938,149.165085,584,270"
cmd = 'build/sitl/bin/ardurover --model "{}" --speedup "{}" --home "{}"'.format(model, speedup, home)
container.exec_run(cmd, detach=True)
print("CCC")
# connect to the SITL from the host via dronekit
#port = 14550
#url = "{}:{}".format(container.ip_address, port)
#dronekit.connect(url, wait_ready=True)
|
<commit_before><commit_msg>Create container, build the spurce and run ardurover<commit_after>import docker
import dronekit
# connect to the Docker daemon
client = docker.client.from_env()
print("AAA")
# provision a container from an ArduPilot image
image_name = "squareslab/ardubugs:base"
container = client.containers.create(image_name, "/bin/bash", stdin_open=True, detach=True, working_dir="/experiment/source")
container.start()
print(container.status)
print("BBB")
# build SITL
cmd = "./waf configure"
container.exec_run(cmd)
cmd = "./waf build -j8"
container.exec_run(cmd)
# start the SITL inside a container
model = "rover"
speedup = "1.0"
home = "-35.362938,149.165085,584,270"
cmd = 'build/sitl/bin/ardurover --model "{}" --speedup "{}" --home "{}"'.format(model, speedup, home)
container.exec_run(cmd, detach=True)
print("CCC")
# connect to the SITL from the host via dronekit
#port = 14550
#url = "{}:{}".format(container.ip_address, port)
#dronekit.connect(url, wait_ready=True)
|
|
b63dd66a96b503eb10d2eb49162ed4ed99a9e8f2
|
apps/domain/tests/test_core/test_manager/test_env_manager.py
|
apps/domain/tests/test_core/test_manager/test_env_manager.py
|
from src.main.core.database import *
from src.main.core.database.environment.environment import Environment
from src.main.core.database.environment.user_environment import UserEnvironment
from src.main.core.manager import EnvironmentManager, UserManager
from src.main.core.exceptions import InvalidCredentialsError
import pytest
from bcrypt import checkpw
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(Environment).delete()
database.session.query(UserEnvironment).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_env_manager(database, cleanup):
environments = EnvironmentManager(database)
def test_create_user_manager(database, cleanup):
users = UserManager(database)
def test_register_new_environment(database, cleanup):
environment = EnvironmentManager(database)
new_env = environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
assert new_env.address == "http://localhost:5000/"
assert new_env.memory == "32"
assert new_env.instance == "EC2"
assert new_env.gpu == "RTX3070"
def test_query_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.query(address="http://localhost:5000/")[0]
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
def test_first_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.first(address="http://localhost:5000/")
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
|
ADD initial environment unit tests
|
ADD initial environment unit tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD initial environment unit tests
|
from src.main.core.database import *
from src.main.core.database.environment.environment import Environment
from src.main.core.database.environment.user_environment import UserEnvironment
from src.main.core.manager import EnvironmentManager, UserManager
from src.main.core.exceptions import InvalidCredentialsError
import pytest
from bcrypt import checkpw
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(Environment).delete()
database.session.query(UserEnvironment).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_env_manager(database, cleanup):
environments = EnvironmentManager(database)
def test_create_user_manager(database, cleanup):
users = UserManager(database)
def test_register_new_environment(database, cleanup):
environment = EnvironmentManager(database)
new_env = environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
assert new_env.address == "http://localhost:5000/"
assert new_env.memory == "32"
assert new_env.instance == "EC2"
assert new_env.gpu == "RTX3070"
def test_query_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.query(address="http://localhost:5000/")[0]
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
def test_first_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.first(address="http://localhost:5000/")
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
|
<commit_before><commit_msg>ADD initial environment unit tests<commit_after>
|
from src.main.core.database import *
from src.main.core.database.environment.environment import Environment
from src.main.core.database.environment.user_environment import UserEnvironment
from src.main.core.manager import EnvironmentManager, UserManager
from src.main.core.exceptions import InvalidCredentialsError
import pytest
from bcrypt import checkpw
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(Environment).delete()
database.session.query(UserEnvironment).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_env_manager(database, cleanup):
environments = EnvironmentManager(database)
def test_create_user_manager(database, cleanup):
users = UserManager(database)
def test_register_new_environment(database, cleanup):
environment = EnvironmentManager(database)
new_env = environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
assert new_env.address == "http://localhost:5000/"
assert new_env.memory == "32"
assert new_env.instance == "EC2"
assert new_env.gpu == "RTX3070"
def test_query_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.query(address="http://localhost:5000/")[0]
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
def test_first_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.first(address="http://localhost:5000/")
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
|
ADD initial environment unit testsfrom src.main.core.database import *
from src.main.core.database.environment.environment import Environment
from src.main.core.database.environment.user_environment import UserEnvironment
from src.main.core.manager import EnvironmentManager, UserManager
from src.main.core.exceptions import InvalidCredentialsError
import pytest
from bcrypt import checkpw
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(Environment).delete()
database.session.query(UserEnvironment).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_env_manager(database, cleanup):
environments = EnvironmentManager(database)
def test_create_user_manager(database, cleanup):
users = UserManager(database)
def test_register_new_environment(database, cleanup):
environment = EnvironmentManager(database)
new_env = environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
assert new_env.address == "http://localhost:5000/"
assert new_env.memory == "32"
assert new_env.instance == "EC2"
assert new_env.gpu == "RTX3070"
def test_query_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.query(address="http://localhost:5000/")[0]
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
def test_first_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.first(address="http://localhost:5000/")
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
|
<commit_before><commit_msg>ADD initial environment unit tests<commit_after>from src.main.core.database import *
from src.main.core.database.environment.environment import Environment
from src.main.core.database.environment.user_environment import UserEnvironment
from src.main.core.manager import EnvironmentManager, UserManager
from src.main.core.exceptions import InvalidCredentialsError
import pytest
from bcrypt import checkpw
user_role = ("User", False, False, False, False, False, False, False)
admin_role = ("Administrator", True, True, True, True, False, False, True)
@pytest.fixture
def cleanup(database):
yield
try:
database.session.query(User).delete()
database.session.query(Role).delete()
database.session.query(Group).delete()
database.session.query(UserGroup).delete()
database.session.query(Environment).delete()
database.session.query(UserEnvironment).delete()
database.session.commit()
except:
database.session.rollback()
def test_create_env_manager(database, cleanup):
environments = EnvironmentManager(database)
def test_create_user_manager(database, cleanup):
users = UserManager(database)
def test_register_new_environment(database, cleanup):
environment = EnvironmentManager(database)
new_env = environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
assert new_env.address == "http://localhost:5000/"
assert new_env.memory == "32"
assert new_env.instance == "EC2"
assert new_env.gpu == "RTX3070"
def test_query_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.query(address="http://localhost:5000/")[0]
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
def test_first_new_environment(database, cleanup):
environment = EnvironmentManager(database)
environment.register(
address="http://localhost:5000/",
memory="32",
instance="EC2",
gpu="RTX3070",
)
result = environment.first(address="http://localhost:5000/")
assert result.memory == "32"
assert result.instance == "EC2"
assert result.gpu == "RTX3070"
|
|
0a29c11ca7b2769f993a131b6d9df35b33f3538c
|
pyrobus/utils.py
|
pyrobus/utils.py
|
from __future__ import division
from threading import Thread
from time import time, sleep
from math import sin, pi
class Sinus(object):
update_frequency = 25.0
def __init__(self, motor, frequency, amplitude, offset, phase):
self.motor = motor
self.frequency = frequency
self.amplitude = amplitude
self.offset = offset
self.phase = phase
self._running = False
self._t = None
def start(self):
if self._t is not None:
raise EnvironmentError('Sinus already running!')
self._running = True
self._t = Thread(target=self._run)
self._t.start()
def stop(self):
self._running = False
if self._t is not None:
self._t.join()
self._t = None
def _run(self):
t0 = time()
while self._running:
t = time() - t0
pos = self.amplitude * sin(2 * pi * self.frequency * t + (self.phase * pi / 180)) + self.offset
self.motor.target_position = pos
sleep(1 / self.update_frequency)
|
Add a sinus utilities for the servo module.
|
Add a sinus utilities for the servo module.
|
Python
|
mit
|
pollen/pyrobus
|
Add a sinus utilities for the servo module.
|
from __future__ import division
from threading import Thread
from time import time, sleep
from math import sin, pi
class Sinus(object):
update_frequency = 25.0
def __init__(self, motor, frequency, amplitude, offset, phase):
self.motor = motor
self.frequency = frequency
self.amplitude = amplitude
self.offset = offset
self.phase = phase
self._running = False
self._t = None
def start(self):
if self._t is not None:
raise EnvironmentError('Sinus already running!')
self._running = True
self._t = Thread(target=self._run)
self._t.start()
def stop(self):
self._running = False
if self._t is not None:
self._t.join()
self._t = None
def _run(self):
t0 = time()
while self._running:
t = time() - t0
pos = self.amplitude * sin(2 * pi * self.frequency * t + (self.phase * pi / 180)) + self.offset
self.motor.target_position = pos
sleep(1 / self.update_frequency)
|
<commit_before><commit_msg>Add a sinus utilities for the servo module.<commit_after>
|
from __future__ import division
from threading import Thread
from time import time, sleep
from math import sin, pi
class Sinus(object):
update_frequency = 25.0
def __init__(self, motor, frequency, amplitude, offset, phase):
self.motor = motor
self.frequency = frequency
self.amplitude = amplitude
self.offset = offset
self.phase = phase
self._running = False
self._t = None
def start(self):
if self._t is not None:
raise EnvironmentError('Sinus already running!')
self._running = True
self._t = Thread(target=self._run)
self._t.start()
def stop(self):
self._running = False
if self._t is not None:
self._t.join()
self._t = None
def _run(self):
t0 = time()
while self._running:
t = time() - t0
pos = self.amplitude * sin(2 * pi * self.frequency * t + (self.phase * pi / 180)) + self.offset
self.motor.target_position = pos
sleep(1 / self.update_frequency)
|
Add a sinus utilities for the servo module.from __future__ import division
from threading import Thread
from time import time, sleep
from math import sin, pi
class Sinus(object):
update_frequency = 25.0
def __init__(self, motor, frequency, amplitude, offset, phase):
self.motor = motor
self.frequency = frequency
self.amplitude = amplitude
self.offset = offset
self.phase = phase
self._running = False
self._t = None
def start(self):
if self._t is not None:
raise EnvironmentError('Sinus already running!')
self._running = True
self._t = Thread(target=self._run)
self._t.start()
def stop(self):
self._running = False
if self._t is not None:
self._t.join()
self._t = None
def _run(self):
t0 = time()
while self._running:
t = time() - t0
pos = self.amplitude * sin(2 * pi * self.frequency * t + (self.phase * pi / 180)) + self.offset
self.motor.target_position = pos
sleep(1 / self.update_frequency)
|
<commit_before><commit_msg>Add a sinus utilities for the servo module.<commit_after>from __future__ import division
from threading import Thread
from time import time, sleep
from math import sin, pi
class Sinus(object):
update_frequency = 25.0
def __init__(self, motor, frequency, amplitude, offset, phase):
self.motor = motor
self.frequency = frequency
self.amplitude = amplitude
self.offset = offset
self.phase = phase
self._running = False
self._t = None
def start(self):
if self._t is not None:
raise EnvironmentError('Sinus already running!')
self._running = True
self._t = Thread(target=self._run)
self._t.start()
def stop(self):
self._running = False
if self._t is not None:
self._t.join()
self._t = None
def _run(self):
t0 = time()
while self._running:
t = time() - t0
pos = self.amplitude * sin(2 * pi * self.frequency * t + (self.phase * pi / 180)) + self.offset
self.motor.target_position = pos
sleep(1 / self.update_frequency)
|
|
afc9ac087d4d5e31f25542579e5e0ade4d29a3c2
|
day-1-2.py
|
day-1-2.py
|
with open('day-1-input.txt', 'r') as f:
puzzle_input = f.read()
current_floor = 0
current_position = 0
for paren in puzzle_input:
current_position += 1
if paren == '(':
current_floor += 1
elif paren == ')':
current_floor -= 1
if current_floor < 0:
break
print(current_position)
# My answer: 1795
|
Complete day 1 part 2
|
Complete day 1 part 2
|
Python
|
mit
|
foxscotch/advent-of-code,foxscotch/advent-of-code
|
Complete day 1 part 2
|
with open('day-1-input.txt', 'r') as f:
puzzle_input = f.read()
current_floor = 0
current_position = 0
for paren in puzzle_input:
current_position += 1
if paren == '(':
current_floor += 1
elif paren == ')':
current_floor -= 1
if current_floor < 0:
break
print(current_position)
# My answer: 1795
|
<commit_before><commit_msg>Complete day 1 part 2<commit_after>
|
with open('day-1-input.txt', 'r') as f:
puzzle_input = f.read()
current_floor = 0
current_position = 0
for paren in puzzle_input:
current_position += 1
if paren == '(':
current_floor += 1
elif paren == ')':
current_floor -= 1
if current_floor < 0:
break
print(current_position)
# My answer: 1795
|
Complete day 1 part 2with open('day-1-input.txt', 'r') as f:
puzzle_input = f.read()
current_floor = 0
current_position = 0
for paren in puzzle_input:
current_position += 1
if paren == '(':
current_floor += 1
elif paren == ')':
current_floor -= 1
if current_floor < 0:
break
print(current_position)
# My answer: 1795
|
<commit_before><commit_msg>Complete day 1 part 2<commit_after>with open('day-1-input.txt', 'r') as f:
puzzle_input = f.read()
current_floor = 0
current_position = 0
for paren in puzzle_input:
current_position += 1
if paren == '(':
current_floor += 1
elif paren == ')':
current_floor -= 1
if current_floor < 0:
break
print(current_position)
# My answer: 1795
|
|
a455c8f667af831b53b10abf998cbcaef2e2e1dd
|
shuup/testing/migrations/0005_supplierprice.py
|
shuup/testing/migrations/0005_supplierprice.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.utils.properties
import shuup.core.fields
class Migration(migrations.Migration):
dependencies = [
('shuup', '0051_supplier_enabled'),
('shuup_testing', '0004_fieldsmodel'),
]
operations = [
migrations.CreateModel(
name='SupplierPrice',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('amount_value', shuup.core.fields.MoneyValueField(max_digits=36, decimal_places=9)),
('product', models.ForeignKey(to='shuup.Product')),
('shop', models.ForeignKey(to='shuup.Shop')),
('supplier', models.ForeignKey(to='shuup.Supplier')),
],
bases=(shuup.utils.properties.MoneyPropped, models.Model),
),
]
|
Add missing migration for unit tests
|
Add missing migration for unit tests
|
Python
|
agpl-3.0
|
shoopio/shoop,shoopio/shoop,shoopio/shoop
|
Add missing migration for unit tests
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.utils.properties
import shuup.core.fields
class Migration(migrations.Migration):
dependencies = [
('shuup', '0051_supplier_enabled'),
('shuup_testing', '0004_fieldsmodel'),
]
operations = [
migrations.CreateModel(
name='SupplierPrice',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('amount_value', shuup.core.fields.MoneyValueField(max_digits=36, decimal_places=9)),
('product', models.ForeignKey(to='shuup.Product')),
('shop', models.ForeignKey(to='shuup.Shop')),
('supplier', models.ForeignKey(to='shuup.Supplier')),
],
bases=(shuup.utils.properties.MoneyPropped, models.Model),
),
]
|
<commit_before><commit_msg>Add missing migration for unit tests<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.utils.properties
import shuup.core.fields
class Migration(migrations.Migration):
dependencies = [
('shuup', '0051_supplier_enabled'),
('shuup_testing', '0004_fieldsmodel'),
]
operations = [
migrations.CreateModel(
name='SupplierPrice',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('amount_value', shuup.core.fields.MoneyValueField(max_digits=36, decimal_places=9)),
('product', models.ForeignKey(to='shuup.Product')),
('shop', models.ForeignKey(to='shuup.Shop')),
('supplier', models.ForeignKey(to='shuup.Supplier')),
],
bases=(shuup.utils.properties.MoneyPropped, models.Model),
),
]
|
Add missing migration for unit tests# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.utils.properties
import shuup.core.fields
class Migration(migrations.Migration):
dependencies = [
('shuup', '0051_supplier_enabled'),
('shuup_testing', '0004_fieldsmodel'),
]
operations = [
migrations.CreateModel(
name='SupplierPrice',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('amount_value', shuup.core.fields.MoneyValueField(max_digits=36, decimal_places=9)),
('product', models.ForeignKey(to='shuup.Product')),
('shop', models.ForeignKey(to='shuup.Shop')),
('supplier', models.ForeignKey(to='shuup.Supplier')),
],
bases=(shuup.utils.properties.MoneyPropped, models.Model),
),
]
|
<commit_before><commit_msg>Add missing migration for unit tests<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import shuup.utils.properties
import shuup.core.fields
class Migration(migrations.Migration):
dependencies = [
('shuup', '0051_supplier_enabled'),
('shuup_testing', '0004_fieldsmodel'),
]
operations = [
migrations.CreateModel(
name='SupplierPrice',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('amount_value', shuup.core.fields.MoneyValueField(max_digits=36, decimal_places=9)),
('product', models.ForeignKey(to='shuup.Product')),
('shop', models.ForeignKey(to='shuup.Shop')),
('supplier', models.ForeignKey(to='shuup.Supplier')),
],
bases=(shuup.utils.properties.MoneyPropped, models.Model),
),
]
|
|
769d8b85ba9591d5640008dd2812c1db96cd3cef
|
cube_analysis/cube_utils.py
|
cube_analysis/cube_utils.py
|
from spectral_cube import SpectralCube, VaryingResolutionSpectralCube
from astropy.io import fits
import astropy.units as u
import os
from .io_utils import create_huge_fits
from .progressbar import ProgressBar
def convert_K(cube_name, output_folder, is_huge=True, verbose=False):
'''
Convert a larger-than-memory cube from Jy/beam to K
'''
# Load the header from the file
hdr = fits.getheader(cube_name)
# Only need to change BUNIT
new_hdr = hdr.copy()
new_hdr['BUNIT'] = 'K'
spec_shape = hdr['NAXIS3']
# Append K to the cube name
cube_K_name = os.path.join(output_folder,
f"{cube_name.rstrip('.fits')}_K.fits")
if is_huge:
create_huge_fits(cube_K_name, new_hdr, verbose=verbose)
if verbose:
pbar = ProgressBar(spec_shape)
for chan in range(spec_shape):
cube = SpectralCube.read(cube_name, mode='denywrite')
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu[0].data[chan] = cube[chan:chan + 1].to(u.K).unitless_filled_data[:]
cube_K_hdu.flush()
cube_K_hdu.close()
del cube
if verbose:
pbar.update(chan + 1)
# Append a beams table.
orig_cube = fits.open(cube_name, mode='denywrite')
if len(orig_cube) == 2:
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu.append(orig_cube[1])
cube_K_hdu.flush()
cube_K_hdu.close()
orig_cube.close()
else:
cube = SpectralCube.read(cube_name)
cube_K = cube.to(u.K)
cube_K.write(cube_K_name)
|
Add per channel K conversion function
|
Add per channel K conversion function
|
Python
|
mit
|
e-koch/CubeAnalysis
|
Add per channel K conversion function
|
from spectral_cube import SpectralCube, VaryingResolutionSpectralCube
from astropy.io import fits
import astropy.units as u
import os
from .io_utils import create_huge_fits
from .progressbar import ProgressBar
def convert_K(cube_name, output_folder, is_huge=True, verbose=False):
'''
Convert a larger-than-memory cube from Jy/beam to K
'''
# Load the header from the file
hdr = fits.getheader(cube_name)
# Only need to change BUNIT
new_hdr = hdr.copy()
new_hdr['BUNIT'] = 'K'
spec_shape = hdr['NAXIS3']
# Append K to the cube name
cube_K_name = os.path.join(output_folder,
f"{cube_name.rstrip('.fits')}_K.fits")
if is_huge:
create_huge_fits(cube_K_name, new_hdr, verbose=verbose)
if verbose:
pbar = ProgressBar(spec_shape)
for chan in range(spec_shape):
cube = SpectralCube.read(cube_name, mode='denywrite')
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu[0].data[chan] = cube[chan:chan + 1].to(u.K).unitless_filled_data[:]
cube_K_hdu.flush()
cube_K_hdu.close()
del cube
if verbose:
pbar.update(chan + 1)
# Append a beams table.
orig_cube = fits.open(cube_name, mode='denywrite')
if len(orig_cube) == 2:
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu.append(orig_cube[1])
cube_K_hdu.flush()
cube_K_hdu.close()
orig_cube.close()
else:
cube = SpectralCube.read(cube_name)
cube_K = cube.to(u.K)
cube_K.write(cube_K_name)
|
<commit_before><commit_msg>Add per channel K conversion function<commit_after>
|
from spectral_cube import SpectralCube, VaryingResolutionSpectralCube
from astropy.io import fits
import astropy.units as u
import os
from .io_utils import create_huge_fits
from .progressbar import ProgressBar
def convert_K(cube_name, output_folder, is_huge=True, verbose=False):
'''
Convert a larger-than-memory cube from Jy/beam to K
'''
# Load the header from the file
hdr = fits.getheader(cube_name)
# Only need to change BUNIT
new_hdr = hdr.copy()
new_hdr['BUNIT'] = 'K'
spec_shape = hdr['NAXIS3']
# Append K to the cube name
cube_K_name = os.path.join(output_folder,
f"{cube_name.rstrip('.fits')}_K.fits")
if is_huge:
create_huge_fits(cube_K_name, new_hdr, verbose=verbose)
if verbose:
pbar = ProgressBar(spec_shape)
for chan in range(spec_shape):
cube = SpectralCube.read(cube_name, mode='denywrite')
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu[0].data[chan] = cube[chan:chan + 1].to(u.K).unitless_filled_data[:]
cube_K_hdu.flush()
cube_K_hdu.close()
del cube
if verbose:
pbar.update(chan + 1)
# Append a beams table.
orig_cube = fits.open(cube_name, mode='denywrite')
if len(orig_cube) == 2:
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu.append(orig_cube[1])
cube_K_hdu.flush()
cube_K_hdu.close()
orig_cube.close()
else:
cube = SpectralCube.read(cube_name)
cube_K = cube.to(u.K)
cube_K.write(cube_K_name)
|
Add per channel K conversion function
from spectral_cube import SpectralCube, VaryingResolutionSpectralCube
from astropy.io import fits
import astropy.units as u
import os
from .io_utils import create_huge_fits
from .progressbar import ProgressBar
def convert_K(cube_name, output_folder, is_huge=True, verbose=False):
'''
Convert a larger-than-memory cube from Jy/beam to K
'''
# Load the header from the file
hdr = fits.getheader(cube_name)
# Only need to change BUNIT
new_hdr = hdr.copy()
new_hdr['BUNIT'] = 'K'
spec_shape = hdr['NAXIS3']
# Append K to the cube name
cube_K_name = os.path.join(output_folder,
f"{cube_name.rstrip('.fits')}_K.fits")
if is_huge:
create_huge_fits(cube_K_name, new_hdr, verbose=verbose)
if verbose:
pbar = ProgressBar(spec_shape)
for chan in range(spec_shape):
cube = SpectralCube.read(cube_name, mode='denywrite')
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu[0].data[chan] = cube[chan:chan + 1].to(u.K).unitless_filled_data[:]
cube_K_hdu.flush()
cube_K_hdu.close()
del cube
if verbose:
pbar.update(chan + 1)
# Append a beams table.
orig_cube = fits.open(cube_name, mode='denywrite')
if len(orig_cube) == 2:
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu.append(orig_cube[1])
cube_K_hdu.flush()
cube_K_hdu.close()
orig_cube.close()
else:
cube = SpectralCube.read(cube_name)
cube_K = cube.to(u.K)
cube_K.write(cube_K_name)
|
<commit_before><commit_msg>Add per channel K conversion function<commit_after>
from spectral_cube import SpectralCube, VaryingResolutionSpectralCube
from astropy.io import fits
import astropy.units as u
import os
from .io_utils import create_huge_fits
from .progressbar import ProgressBar
def convert_K(cube_name, output_folder, is_huge=True, verbose=False):
'''
Convert a larger-than-memory cube from Jy/beam to K
'''
# Load the header from the file
hdr = fits.getheader(cube_name)
# Only need to change BUNIT
new_hdr = hdr.copy()
new_hdr['BUNIT'] = 'K'
spec_shape = hdr['NAXIS3']
# Append K to the cube name
cube_K_name = os.path.join(output_folder,
f"{cube_name.rstrip('.fits')}_K.fits")
if is_huge:
create_huge_fits(cube_K_name, new_hdr, verbose=verbose)
if verbose:
pbar = ProgressBar(spec_shape)
for chan in range(spec_shape):
cube = SpectralCube.read(cube_name, mode='denywrite')
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu[0].data[chan] = cube[chan:chan + 1].to(u.K).unitless_filled_data[:]
cube_K_hdu.flush()
cube_K_hdu.close()
del cube
if verbose:
pbar.update(chan + 1)
# Append a beams table.
orig_cube = fits.open(cube_name, mode='denywrite')
if len(orig_cube) == 2:
cube_K_hdu = fits.open(cube_K_name, mode='update')
cube_K_hdu.append(orig_cube[1])
cube_K_hdu.flush()
cube_K_hdu.close()
orig_cube.close()
else:
cube = SpectralCube.read(cube_name)
cube_K = cube.to(u.K)
cube_K.write(cube_K_name)
|
|
9ea0dcb8a749461770dfeb7a77c6e3d210afa94e
|
clipped_loss.py
|
clipped_loss.py
|
from chainer import functions as F
def clipped_loss(x, t):
diff = x - t
abs_loss = abs(diff)
squared_loss = diff ** 2
abs_loss = F.expand_dims(abs_loss, 1)
squared_loss = F.expand_dims(squared_loss, 1)
return F.sum(F.min(F.concat((abs_loss, squared_loss), axis=1), axis=1))
|
Move clippped_loss into a separate file
|
Move clippped_loss into a separate file
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Move clippped_loss into a separate file
|
from chainer import functions as F
def clipped_loss(x, t):
diff = x - t
abs_loss = abs(diff)
squared_loss = diff ** 2
abs_loss = F.expand_dims(abs_loss, 1)
squared_loss = F.expand_dims(squared_loss, 1)
return F.sum(F.min(F.concat((abs_loss, squared_loss), axis=1), axis=1))
|
<commit_before><commit_msg>Move clippped_loss into a separate file<commit_after>
|
from chainer import functions as F
def clipped_loss(x, t):
diff = x - t
abs_loss = abs(diff)
squared_loss = diff ** 2
abs_loss = F.expand_dims(abs_loss, 1)
squared_loss = F.expand_dims(squared_loss, 1)
return F.sum(F.min(F.concat((abs_loss, squared_loss), axis=1), axis=1))
|
Move clippped_loss into a separate filefrom chainer import functions as F
def clipped_loss(x, t):
diff = x - t
abs_loss = abs(diff)
squared_loss = diff ** 2
abs_loss = F.expand_dims(abs_loss, 1)
squared_loss = F.expand_dims(squared_loss, 1)
return F.sum(F.min(F.concat((abs_loss, squared_loss), axis=1), axis=1))
|
<commit_before><commit_msg>Move clippped_loss into a separate file<commit_after>from chainer import functions as F
def clipped_loss(x, t):
diff = x - t
abs_loss = abs(diff)
squared_loss = diff ** 2
abs_loss = F.expand_dims(abs_loss, 1)
squared_loss = F.expand_dims(squared_loss, 1)
return F.sum(F.min(F.concat((abs_loss, squared_loss), axis=1), axis=1))
|
|
ac19575d72a78b0c7a6e51a51738209ac7caf26e
|
extract.py
|
extract.py
|
#!/usr/bin/python3
import sys
import os
import subprocess
if len(sys.argv) < 3:
print("Provide reference image and output file.")
exit(1)
reference = sys.argv[1]
output = sys.argv[2]
path = os.getcwd() if len(sys.argv) == 3 else sys.argv[3]
def get_time(path):
result = subprocess.run(["master", "time", path], stdout = subprocess.PIPE)
return float(result.stdout.strip())
def get_errors(path, reference):
result = subprocess.run(["master", "errors", path, reference], stdout = subprocess.PIPE)
return tuple(map(float, result.stdout.strip().split()))
def get_technique(path):
TECHNIQUES = ["BPT", "VCM", "UPG", "PT"]
for technique in TECHNIQUES:
if technique in path:
return technique
return "N/A"
PT = []
BPT = []
VCM = []
UPG = []
images = [entry.path for entry in os.scandir(path) if entry.path.endswith(".exr")]
for image in images:
technique = get_technique(image)
entry = (get_time(image),) + get_errors(image, reference) + (get_technique(image), os.path.basename(image))
print(image)
if technique == "PT":
PT.append(entry)
elif technique == "BPT":
BPT.append(entry)
elif technique == "VCM":
VCM.append(entry)
elif technique == "UPG":
UPG.append(entry)
PT.sort(key = lambda x: x[0])
BPT.sort(key = lambda x: x[0])
VCM.sort(key = lambda x: x[0])
UPG.sort(key = lambda x: x[0])
def write_data(basename, technique, data):
filename, file_extension = os.path.splitext(basename)
file = open(filename + "." + technique + file_extension, "w+")
for entry in data:
file.write("{:16} {:16} {:16} {:6} # {}\n".format(*entry))
write_data(output, "pt", PT)
write_data(output, "bpt", BPT)
write_data(output, "vcm", VCM)
write_data(output, "upg", UPG)
|
Add pythons script to make charts.
|
Add pythons script to make charts.
|
Python
|
mit
|
ciechowoj/master,ciechowoj/master,ciechowoj/master
|
Add pythons script to make charts.
|
#!/usr/bin/python3
import sys
import os
import subprocess
if len(sys.argv) < 3:
print("Provide reference image and output file.")
exit(1)
reference = sys.argv[1]
output = sys.argv[2]
path = os.getcwd() if len(sys.argv) == 3 else sys.argv[3]
def get_time(path):
result = subprocess.run(["master", "time", path], stdout = subprocess.PIPE)
return float(result.stdout.strip())
def get_errors(path, reference):
result = subprocess.run(["master", "errors", path, reference], stdout = subprocess.PIPE)
return tuple(map(float, result.stdout.strip().split()))
def get_technique(path):
TECHNIQUES = ["BPT", "VCM", "UPG", "PT"]
for technique in TECHNIQUES:
if technique in path:
return technique
return "N/A"
PT = []
BPT = []
VCM = []
UPG = []
images = [entry.path for entry in os.scandir(path) if entry.path.endswith(".exr")]
for image in images:
technique = get_technique(image)
entry = (get_time(image),) + get_errors(image, reference) + (get_technique(image), os.path.basename(image))
print(image)
if technique == "PT":
PT.append(entry)
elif technique == "BPT":
BPT.append(entry)
elif technique == "VCM":
VCM.append(entry)
elif technique == "UPG":
UPG.append(entry)
PT.sort(key = lambda x: x[0])
BPT.sort(key = lambda x: x[0])
VCM.sort(key = lambda x: x[0])
UPG.sort(key = lambda x: x[0])
def write_data(basename, technique, data):
filename, file_extension = os.path.splitext(basename)
file = open(filename + "." + technique + file_extension, "w+")
for entry in data:
file.write("{:16} {:16} {:16} {:6} # {}\n".format(*entry))
write_data(output, "pt", PT)
write_data(output, "bpt", BPT)
write_data(output, "vcm", VCM)
write_data(output, "upg", UPG)
|
<commit_before><commit_msg>Add pythons script to make charts.<commit_after>
|
#!/usr/bin/python3
import sys
import os
import subprocess
if len(sys.argv) < 3:
print("Provide reference image and output file.")
exit(1)
reference = sys.argv[1]
output = sys.argv[2]
path = os.getcwd() if len(sys.argv) == 3 else sys.argv[3]
def get_time(path):
result = subprocess.run(["master", "time", path], stdout = subprocess.PIPE)
return float(result.stdout.strip())
def get_errors(path, reference):
result = subprocess.run(["master", "errors", path, reference], stdout = subprocess.PIPE)
return tuple(map(float, result.stdout.strip().split()))
def get_technique(path):
TECHNIQUES = ["BPT", "VCM", "UPG", "PT"]
for technique in TECHNIQUES:
if technique in path:
return technique
return "N/A"
PT = []
BPT = []
VCM = []
UPG = []
images = [entry.path for entry in os.scandir(path) if entry.path.endswith(".exr")]
for image in images:
technique = get_technique(image)
entry = (get_time(image),) + get_errors(image, reference) + (get_technique(image), os.path.basename(image))
print(image)
if technique == "PT":
PT.append(entry)
elif technique == "BPT":
BPT.append(entry)
elif technique == "VCM":
VCM.append(entry)
elif technique == "UPG":
UPG.append(entry)
PT.sort(key = lambda x: x[0])
BPT.sort(key = lambda x: x[0])
VCM.sort(key = lambda x: x[0])
UPG.sort(key = lambda x: x[0])
def write_data(basename, technique, data):
filename, file_extension = os.path.splitext(basename)
file = open(filename + "." + technique + file_extension, "w+")
for entry in data:
file.write("{:16} {:16} {:16} {:6} # {}\n".format(*entry))
write_data(output, "pt", PT)
write_data(output, "bpt", BPT)
write_data(output, "vcm", VCM)
write_data(output, "upg", UPG)
|
Add pythons script to make charts.#!/usr/bin/python3
import sys
import os
import subprocess
if len(sys.argv) < 3:
print("Provide reference image and output file.")
exit(1)
reference = sys.argv[1]
output = sys.argv[2]
path = os.getcwd() if len(sys.argv) == 3 else sys.argv[3]
def get_time(path):
result = subprocess.run(["master", "time", path], stdout = subprocess.PIPE)
return float(result.stdout.strip())
def get_errors(path, reference):
result = subprocess.run(["master", "errors", path, reference], stdout = subprocess.PIPE)
return tuple(map(float, result.stdout.strip().split()))
def get_technique(path):
TECHNIQUES = ["BPT", "VCM", "UPG", "PT"]
for technique in TECHNIQUES:
if technique in path:
return technique
return "N/A"
PT = []
BPT = []
VCM = []
UPG = []
images = [entry.path for entry in os.scandir(path) if entry.path.endswith(".exr")]
for image in images:
technique = get_technique(image)
entry = (get_time(image),) + get_errors(image, reference) + (get_technique(image), os.path.basename(image))
print(image)
if technique == "PT":
PT.append(entry)
elif technique == "BPT":
BPT.append(entry)
elif technique == "VCM":
VCM.append(entry)
elif technique == "UPG":
UPG.append(entry)
PT.sort(key = lambda x: x[0])
BPT.sort(key = lambda x: x[0])
VCM.sort(key = lambda x: x[0])
UPG.sort(key = lambda x: x[0])
def write_data(basename, technique, data):
filename, file_extension = os.path.splitext(basename)
file = open(filename + "." + technique + file_extension, "w+")
for entry in data:
file.write("{:16} {:16} {:16} {:6} # {}\n".format(*entry))
write_data(output, "pt", PT)
write_data(output, "bpt", BPT)
write_data(output, "vcm", VCM)
write_data(output, "upg", UPG)
|
<commit_before><commit_msg>Add pythons script to make charts.<commit_after>#!/usr/bin/python3
import sys
import os
import subprocess
if len(sys.argv) < 3:
print("Provide reference image and output file.")
exit(1)
reference = sys.argv[1]
output = sys.argv[2]
path = os.getcwd() if len(sys.argv) == 3 else sys.argv[3]
def get_time(path):
result = subprocess.run(["master", "time", path], stdout = subprocess.PIPE)
return float(result.stdout.strip())
def get_errors(path, reference):
result = subprocess.run(["master", "errors", path, reference], stdout = subprocess.PIPE)
return tuple(map(float, result.stdout.strip().split()))
def get_technique(path):
TECHNIQUES = ["BPT", "VCM", "UPG", "PT"]
for technique in TECHNIQUES:
if technique in path:
return technique
return "N/A"
PT = []
BPT = []
VCM = []
UPG = []
images = [entry.path for entry in os.scandir(path) if entry.path.endswith(".exr")]
for image in images:
technique = get_technique(image)
entry = (get_time(image),) + get_errors(image, reference) + (get_technique(image), os.path.basename(image))
print(image)
if technique == "PT":
PT.append(entry)
elif technique == "BPT":
BPT.append(entry)
elif technique == "VCM":
VCM.append(entry)
elif technique == "UPG":
UPG.append(entry)
PT.sort(key = lambda x: x[0])
BPT.sort(key = lambda x: x[0])
VCM.sort(key = lambda x: x[0])
UPG.sort(key = lambda x: x[0])
def write_data(basename, technique, data):
filename, file_extension = os.path.splitext(basename)
file = open(filename + "." + technique + file_extension, "w+")
for entry in data:
file.write("{:16} {:16} {:16} {:6} # {}\n".format(*entry))
write_data(output, "pt", PT)
write_data(output, "bpt", BPT)
write_data(output, "vcm", VCM)
write_data(output, "upg", UPG)
|
|
a1d2ec80dff63c7e208305207c697e5d3604a8f5
|
photutils/aperture/attributes.py
|
photutils/aperture/attributes.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Descriptor class(es) for aperture attribute validation.
"""
import weakref
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class ApertureAttribute:
"""
Base descriptor class for aperture attribute validation.
"""
def __init__(self, name):
self.name = name
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.values.get(instance, None)
def __set__(self, instance, value):
self._validate(value)
if isinstance(value, (u.Quantity, SkyCoord)):
self.values[instance] = value
else:
self.values[instance] = float(value)
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
raise NotImplementedError
class Scalar(ApertureAttribute):
"""
Check that value is a scalar.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(f'{self.name} must be a scalar')
class PositiveScalar(ApertureAttribute):
"""
Check that value is a stricly positive (>= 0) scalar.
"""
def _validate(self, value):
if not np.isscalar(value) or value <= 0:
raise ValueError(f'{self.name} must be a positive scalar')
|
Add initial aperture descriptor classes
|
Add initial aperture descriptor classes
|
Python
|
bsd-3-clause
|
larrybradley/photutils,astropy/photutils
|
Add initial aperture descriptor classes
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Descriptor class(es) for aperture attribute validation.
"""
import weakref
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class ApertureAttribute:
"""
Base descriptor class for aperture attribute validation.
"""
def __init__(self, name):
self.name = name
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.values.get(instance, None)
def __set__(self, instance, value):
self._validate(value)
if isinstance(value, (u.Quantity, SkyCoord)):
self.values[instance] = value
else:
self.values[instance] = float(value)
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
raise NotImplementedError
class Scalar(ApertureAttribute):
"""
Check that value is a scalar.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(f'{self.name} must be a scalar')
class PositiveScalar(ApertureAttribute):
"""
Check that value is a stricly positive (>= 0) scalar.
"""
def _validate(self, value):
if not np.isscalar(value) or value <= 0:
raise ValueError(f'{self.name} must be a positive scalar')
|
<commit_before><commit_msg>Add initial aperture descriptor classes<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Descriptor class(es) for aperture attribute validation.
"""
import weakref
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class ApertureAttribute:
"""
Base descriptor class for aperture attribute validation.
"""
def __init__(self, name):
self.name = name
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.values.get(instance, None)
def __set__(self, instance, value):
self._validate(value)
if isinstance(value, (u.Quantity, SkyCoord)):
self.values[instance] = value
else:
self.values[instance] = float(value)
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
raise NotImplementedError
class Scalar(ApertureAttribute):
"""
Check that value is a scalar.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(f'{self.name} must be a scalar')
class PositiveScalar(ApertureAttribute):
"""
Check that value is a stricly positive (>= 0) scalar.
"""
def _validate(self, value):
if not np.isscalar(value) or value <= 0:
raise ValueError(f'{self.name} must be a positive scalar')
|
Add initial aperture descriptor classes# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Descriptor class(es) for aperture attribute validation.
"""
import weakref
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class ApertureAttribute:
"""
Base descriptor class for aperture attribute validation.
"""
def __init__(self, name):
self.name = name
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.values.get(instance, None)
def __set__(self, instance, value):
self._validate(value)
if isinstance(value, (u.Quantity, SkyCoord)):
self.values[instance] = value
else:
self.values[instance] = float(value)
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
raise NotImplementedError
class Scalar(ApertureAttribute):
"""
Check that value is a scalar.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(f'{self.name} must be a scalar')
class PositiveScalar(ApertureAttribute):
"""
Check that value is a stricly positive (>= 0) scalar.
"""
def _validate(self, value):
if not np.isscalar(value) or value <= 0:
raise ValueError(f'{self.name} must be a positive scalar')
|
<commit_before><commit_msg>Add initial aperture descriptor classes<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Descriptor class(es) for aperture attribute validation.
"""
import weakref
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
class ApertureAttribute:
"""
Base descriptor class for aperture attribute validation.
"""
def __init__(self, name):
self.name = name
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
return self.values.get(instance, None)
def __set__(self, instance, value):
self._validate(value)
if isinstance(value, (u.Quantity, SkyCoord)):
self.values[instance] = value
else:
self.values[instance] = float(value)
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
raise NotImplementedError
class Scalar(ApertureAttribute):
"""
Check that value is a scalar.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(f'{self.name} must be a scalar')
class PositiveScalar(ApertureAttribute):
"""
Check that value is a stricly positive (>= 0) scalar.
"""
def _validate(self, value):
if not np.isscalar(value) or value <= 0:
raise ValueError(f'{self.name} must be a positive scalar')
|
|
8d6c7623539a7109893c10aca0e116bf41b51634
|
web/migrations/versions/ad456cec28f4_add_user_name_column.py
|
web/migrations/versions/ad456cec28f4_add_user_name_column.py
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False, server_default=''))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
Add column default for User.name column
|
Add column default for User.name column
|
Python
|
mit
|
usgo/online-ratings,usgo/online-ratings,usgo/online-ratings,Kashomon/online-ratings,Kashomon/online-ratings,Kashomon/online-ratings
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
Add column default for User.name column
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False, server_default=''))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
<commit_before>"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
<commit_msg>Add column default for User.name column<commit_after>
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False, server_default=''))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
Add column default for User.name column"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False, server_default=''))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
<commit_before>"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
<commit_msg>Add column default for User.name column<commit_after>"""Add User.name column
Revision ID: ad456cec28f4
Revises: d767d9266a19
Create Date: 2016-11-21 03:03:08.967762
"""
# revision identifiers, used by Alembic.
revision = 'ad456cec28f4'
down_revision = 'd767d9266a19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('myuser', sa.Column('name', sa.String(length=255), nullable=False, server_default=''))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('myuser', 'name')
### end Alembic commands ###
|
b07fcb6cc416c1b8c2a872e054069de218b7f3a5
|
accelerator/migrations/0077_update_sitetree_judging_commitment_view_url.py
|
accelerator/migrations/0077_update_sitetree_judging_commitment_view_url.py
|
# Generated by Django 2.2.24 on 2021-11-29 12:36
from django.db import migrations
def update_judging_commitment_view_url(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
NavTreeItem.objects.filter(
url='/expert/commitments/').update(url='/judging/commitments/')
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_update_sitetree_panels_view_url'),
]
operations = [
migrations.RunPython(update_judging_commitment_view_url,
migrations.RunPython.noop)
]
|
Merge remote-tracking branch 'origin' into AC-9265
|
[AC-9265] Merge remote-tracking branch 'origin' into AC-9265
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-9265] Merge remote-tracking branch 'origin' into AC-9265
|
# Generated by Django 2.2.24 on 2021-11-29 12:36
from django.db import migrations
def update_judging_commitment_view_url(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
NavTreeItem.objects.filter(
url='/expert/commitments/').update(url='/judging/commitments/')
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_update_sitetree_panels_view_url'),
]
operations = [
migrations.RunPython(update_judging_commitment_view_url,
migrations.RunPython.noop)
]
|
<commit_before><commit_msg>[AC-9265] Merge remote-tracking branch 'origin' into AC-9265<commit_after>
|
# Generated by Django 2.2.24 on 2021-11-29 12:36
from django.db import migrations
def update_judging_commitment_view_url(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
NavTreeItem.objects.filter(
url='/expert/commitments/').update(url='/judging/commitments/')
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_update_sitetree_panels_view_url'),
]
operations = [
migrations.RunPython(update_judging_commitment_view_url,
migrations.RunPython.noop)
]
|
[AC-9265] Merge remote-tracking branch 'origin' into AC-9265# Generated by Django 2.2.24 on 2021-11-29 12:36
from django.db import migrations
def update_judging_commitment_view_url(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
NavTreeItem.objects.filter(
url='/expert/commitments/').update(url='/judging/commitments/')
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_update_sitetree_panels_view_url'),
]
operations = [
migrations.RunPython(update_judging_commitment_view_url,
migrations.RunPython.noop)
]
|
<commit_before><commit_msg>[AC-9265] Merge remote-tracking branch 'origin' into AC-9265<commit_after># Generated by Django 2.2.24 on 2021-11-29 12:36
from django.db import migrations
def update_judging_commitment_view_url(apps, schema_editor):
NavTreeItem = apps.get_model('accelerator', 'NavTreeItem')
NavTreeItem.objects.filter(
url='/expert/commitments/').update(url='/judging/commitments/')
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0076_update_sitetree_panels_view_url'),
]
operations = [
migrations.RunPython(update_judging_commitment_view_url,
migrations.RunPython.noop)
]
|
|
b0cafcf2b4ca61f5d2581c833851f9ac24968a98
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Util/Numpy.py
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Util/Numpy.py
|
import Axon
class TypeConverter(Axon.Component.component):
type = None
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
if self.type != None:
self.send(data.astype(self.type), "outbox")
if not self.anyReady():
self.pause()
yield 1
|
Add component for converting between numpy types
|
Add component for converting between numpy types
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Add component for converting between numpy types
|
import Axon
class TypeConverter(Axon.Component.component):
type = None
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
if self.type != None:
self.send(data.astype(self.type), "outbox")
if not self.anyReady():
self.pause()
yield 1
|
<commit_before><commit_msg>Add component for converting between numpy types<commit_after>
|
import Axon
class TypeConverter(Axon.Component.component):
type = None
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
if self.type != None:
self.send(data.astype(self.type), "outbox")
if not self.anyReady():
self.pause()
yield 1
|
Add component for converting between numpy typesimport Axon
class TypeConverter(Axon.Component.component):
type = None
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
if self.type != None:
self.send(data.astype(self.type), "outbox")
if not self.anyReady():
self.pause()
yield 1
|
<commit_before><commit_msg>Add component for converting between numpy types<commit_after>import Axon
class TypeConverter(Axon.Component.component):
type = None
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
if self.type != None:
self.send(data.astype(self.type), "outbox")
if not self.anyReady():
self.pause()
yield 1
|
|
3e8a16b270c28fb955e1ab3e38da0aae2d0d6128
|
src/tmlib/workflow/metaconfig/niselements.py
|
src/tmlib/workflow/metaconfig/niselements.py
|
'''Implementation of classes for reading microscope image and metadata files
provided in a format specific to microscopes equipped
with
`Nikon NISElements <https://www.nikoninstruments.com/Products/Software>`_
software.
'''
import os
import re
import logging
import bioformats
from collections import defaultdict
from tmlib import utils
from tmlib.workflow.metaconfig.base import MetadataReader
from tmlib.workflow.metaconfig.base import MetadataHandler
from tmlib.workflow.metaconfig.omexml import XML_DECLARATION
logger = logging.getLogger(__name__)
#: Regular expression pattern to identify image files
# TODO: how are time points encoded?
IMAGE_FILE_REGEX_PATTERN = '.+_t(?P<t>\d+)_(?P<w>[A-Z]\d+)_(?P<s>\d+)-(?P<c>\d+)\.nd2'
#: Supported extensions for metadata files
METADATA_FILE_REGEX_PATTERN = r'(?!.*)'
class NISElementsMetadataHandler(MetadataHandler):
'''Class for handling metadata specific to microscopes equipped with
NISElements software.
'''
#: Regular expression pattern to identify image files
IMAGE_FILE_REGEX_PATTERN = IMAGE_FILE_REGEX_PATTERN
def __init__(self, omexml_images, omexml_metadata=None):
'''
Parameters
----------
omexml_images: Dict[str, bioformats.omexml.OMEXML]
metadata extracted from microscope image files
omexml_metadata: bioformats.omexml.OMEXML
metadata extracted from microscope metadata files
'''
super(NISElementsMetadataHandler, self).__init__(
omexml_images, omexml_metadata
)
class NISElementsMetadataReader(MetadataReader):
'''Class for reading metadata from files formats specific to microscopes
equipped with NISElements software.
Note
----
The microscope doens't provide any metadata files.
'''
def read(self, microscope_metadata_files, microscope_image_files):
'''Read metadata from "nd" metadata file.
Parameters
----------
microscope_metadata_files: List[str]
absolute path to the microscope metadata files
microscope_image_files: List[str]
absolute path to the microscope image files
Returns
-------
bioformats.omexml.OMEXML
OMEXML image metadata
'''
return bioformats.OMEXML(XML_DECLARATION)
|
Implement new microscope type for nikon NISElements
|
Implement new microscope type for nikon NISElements
|
Python
|
agpl-3.0
|
TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary,TissueMAPS/TmLibrary
|
Implement new microscope type for nikon NISElements
|
'''Implementation of classes for reading microscope image and metadata files
provided in a format specific to microscopes equipped
with
`Nikon NISElements <https://www.nikoninstruments.com/Products/Software>`_
software.
'''
import os
import re
import logging
import bioformats
from collections import defaultdict
from tmlib import utils
from tmlib.workflow.metaconfig.base import MetadataReader
from tmlib.workflow.metaconfig.base import MetadataHandler
from tmlib.workflow.metaconfig.omexml import XML_DECLARATION
logger = logging.getLogger(__name__)
#: Regular expression pattern to identify image files
# TODO: how are time points encoded?
IMAGE_FILE_REGEX_PATTERN = '.+_t(?P<t>\d+)_(?P<w>[A-Z]\d+)_(?P<s>\d+)-(?P<c>\d+)\.nd2'
#: Supported extensions for metadata files
METADATA_FILE_REGEX_PATTERN = r'(?!.*)'
class NISElementsMetadataHandler(MetadataHandler):
'''Class for handling metadata specific to microscopes equipped with
NISElements software.
'''
#: Regular expression pattern to identify image files
IMAGE_FILE_REGEX_PATTERN = IMAGE_FILE_REGEX_PATTERN
def __init__(self, omexml_images, omexml_metadata=None):
'''
Parameters
----------
omexml_images: Dict[str, bioformats.omexml.OMEXML]
metadata extracted from microscope image files
omexml_metadata: bioformats.omexml.OMEXML
metadata extracted from microscope metadata files
'''
super(NISElementsMetadataHandler, self).__init__(
omexml_images, omexml_metadata
)
class NISElementsMetadataReader(MetadataReader):
'''Class for reading metadata from files formats specific to microscopes
equipped with NISElements software.
Note
----
The microscope doens't provide any metadata files.
'''
def read(self, microscope_metadata_files, microscope_image_files):
'''Read metadata from "nd" metadata file.
Parameters
----------
microscope_metadata_files: List[str]
absolute path to the microscope metadata files
microscope_image_files: List[str]
absolute path to the microscope image files
Returns
-------
bioformats.omexml.OMEXML
OMEXML image metadata
'''
return bioformats.OMEXML(XML_DECLARATION)
|
<commit_before><commit_msg>Implement new microscope type for nikon NISElements<commit_after>
|
'''Implementation of classes for reading microscope image and metadata files
provided in a format specific to microscopes equipped
with
`Nikon NISElements <https://www.nikoninstruments.com/Products/Software>`_
software.
'''
import os
import re
import logging
import bioformats
from collections import defaultdict
from tmlib import utils
from tmlib.workflow.metaconfig.base import MetadataReader
from tmlib.workflow.metaconfig.base import MetadataHandler
from tmlib.workflow.metaconfig.omexml import XML_DECLARATION
logger = logging.getLogger(__name__)
#: Regular expression pattern to identify image files
# TODO: how are time points encoded?
IMAGE_FILE_REGEX_PATTERN = '.+_t(?P<t>\d+)_(?P<w>[A-Z]\d+)_(?P<s>\d+)-(?P<c>\d+)\.nd2'
#: Supported extensions for metadata files
METADATA_FILE_REGEX_PATTERN = r'(?!.*)'
class NISElementsMetadataHandler(MetadataHandler):
'''Class for handling metadata specific to microscopes equipped with
NISElements software.
'''
#: Regular expression pattern to identify image files
IMAGE_FILE_REGEX_PATTERN = IMAGE_FILE_REGEX_PATTERN
def __init__(self, omexml_images, omexml_metadata=None):
'''
Parameters
----------
omexml_images: Dict[str, bioformats.omexml.OMEXML]
metadata extracted from microscope image files
omexml_metadata: bioformats.omexml.OMEXML
metadata extracted from microscope metadata files
'''
super(NISElementsMetadataHandler, self).__init__(
omexml_images, omexml_metadata
)
class NISElementsMetadataReader(MetadataReader):
'''Class for reading metadata from files formats specific to microscopes
equipped with NISElements software.
Note
----
The microscope doens't provide any metadata files.
'''
def read(self, microscope_metadata_files, microscope_image_files):
'''Read metadata from "nd" metadata file.
Parameters
----------
microscope_metadata_files: List[str]
absolute path to the microscope metadata files
microscope_image_files: List[str]
absolute path to the microscope image files
Returns
-------
bioformats.omexml.OMEXML
OMEXML image metadata
'''
return bioformats.OMEXML(XML_DECLARATION)
|
Implement new microscope type for nikon NISElements'''Implementation of classes for reading microscope image and metadata files
provided in a format specific to microscopes equipped
with
`Nikon NISElements <https://www.nikoninstruments.com/Products/Software>`_
software.
'''
import os
import re
import logging
import bioformats
from collections import defaultdict
from tmlib import utils
from tmlib.workflow.metaconfig.base import MetadataReader
from tmlib.workflow.metaconfig.base import MetadataHandler
from tmlib.workflow.metaconfig.omexml import XML_DECLARATION
logger = logging.getLogger(__name__)
#: Regular expression pattern to identify image files
# TODO: how are time points encoded?
IMAGE_FILE_REGEX_PATTERN = '.+_t(?P<t>\d+)_(?P<w>[A-Z]\d+)_(?P<s>\d+)-(?P<c>\d+)\.nd2'
#: Supported extensions for metadata files
METADATA_FILE_REGEX_PATTERN = r'(?!.*)'
class NISElementsMetadataHandler(MetadataHandler):
'''Class for handling metadata specific to microscopes equipped with
NISElements software.
'''
#: Regular expression pattern to identify image files
IMAGE_FILE_REGEX_PATTERN = IMAGE_FILE_REGEX_PATTERN
def __init__(self, omexml_images, omexml_metadata=None):
'''
Parameters
----------
omexml_images: Dict[str, bioformats.omexml.OMEXML]
metadata extracted from microscope image files
omexml_metadata: bioformats.omexml.OMEXML
metadata extracted from microscope metadata files
'''
super(NISElementsMetadataHandler, self).__init__(
omexml_images, omexml_metadata
)
class NISElementsMetadataReader(MetadataReader):
'''Class for reading metadata from files formats specific to microscopes
equipped with NISElements software.
Note
----
The microscope doens't provide any metadata files.
'''
def read(self, microscope_metadata_files, microscope_image_files):
'''Read metadata from "nd" metadata file.
Parameters
----------
microscope_metadata_files: List[str]
absolute path to the microscope metadata files
microscope_image_files: List[str]
absolute path to the microscope image files
Returns
-------
bioformats.omexml.OMEXML
OMEXML image metadata
'''
return bioformats.OMEXML(XML_DECLARATION)
|
<commit_before><commit_msg>Implement new microscope type for nikon NISElements<commit_after>'''Implementation of classes for reading microscope image and metadata files
provided in a format specific to microscopes equipped
with
`Nikon NISElements <https://www.nikoninstruments.com/Products/Software>`_
software.
'''
import os
import re
import logging
import bioformats
from collections import defaultdict
from tmlib import utils
from tmlib.workflow.metaconfig.base import MetadataReader
from tmlib.workflow.metaconfig.base import MetadataHandler
from tmlib.workflow.metaconfig.omexml import XML_DECLARATION
logger = logging.getLogger(__name__)
#: Regular expression pattern to identify image files
# TODO: how are time points encoded?
IMAGE_FILE_REGEX_PATTERN = '.+_t(?P<t>\d+)_(?P<w>[A-Z]\d+)_(?P<s>\d+)-(?P<c>\d+)\.nd2'
#: Supported extensions for metadata files
METADATA_FILE_REGEX_PATTERN = r'(?!.*)'
class NISElementsMetadataHandler(MetadataHandler):
'''Class for handling metadata specific to microscopes equipped with
NISElements software.
'''
#: Regular expression pattern to identify image files
IMAGE_FILE_REGEX_PATTERN = IMAGE_FILE_REGEX_PATTERN
def __init__(self, omexml_images, omexml_metadata=None):
'''
Parameters
----------
omexml_images: Dict[str, bioformats.omexml.OMEXML]
metadata extracted from microscope image files
omexml_metadata: bioformats.omexml.OMEXML
metadata extracted from microscope metadata files
'''
super(NISElementsMetadataHandler, self).__init__(
omexml_images, omexml_metadata
)
class NISElementsMetadataReader(MetadataReader):
'''Class for reading metadata from files formats specific to microscopes
equipped with NISElements software.
Note
----
The microscope doens't provide any metadata files.
'''
def read(self, microscope_metadata_files, microscope_image_files):
'''Read metadata from "nd" metadata file.
Parameters
----------
microscope_metadata_files: List[str]
absolute path to the microscope metadata files
microscope_image_files: List[str]
absolute path to the microscope image files
Returns
-------
bioformats.omexml.OMEXML
OMEXML image metadata
'''
return bioformats.OMEXML(XML_DECLARATION)
|
|
95009e638dfa5a20a7277a326607ba7090162fc0
|
seleniumbase/common/obfuscate.py
|
seleniumbase/common/obfuscate.py
|
"""
Obfuscates a string/password into a string that can be decrypted later on.
Usage:
python obfuscate.py
Then enter the password.
The result is an encrypted password.
"""
from seleniumbase.common import encryption
import getpass
import time
def main():
try:
while(1):
print("\nEnter password to obfuscate: (CTRL-C to exit)")
password = getpass.getpass()
print("Verify password:")
verify_password = getpass.getpass()
if password != verify_password:
print("*** ERROR: Passwords don't match! .. Please try again!")
continue
print("\nHere is the obfuscated password:")
time.sleep(0.07)
print(encryption.decrypt(password))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
Add the user interface for string/password obfuscation
|
Add the user interface for string/password obfuscation
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase
|
Add the user interface for string/password obfuscation
|
"""
Obfuscates a string/password into a string that can be decrypted later on.
Usage:
python obfuscate.py
Then enter the password.
The result is an encrypted password.
"""
from seleniumbase.common import encryption
import getpass
import time
def main():
try:
while(1):
print("\nEnter password to obfuscate: (CTRL-C to exit)")
password = getpass.getpass()
print("Verify password:")
verify_password = getpass.getpass()
if password != verify_password:
print("*** ERROR: Passwords don't match! .. Please try again!")
continue
print("\nHere is the obfuscated password:")
time.sleep(0.07)
print(encryption.decrypt(password))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the user interface for string/password obfuscation<commit_after>
|
"""
Obfuscates a string/password into a string that can be decrypted later on.
Usage:
python obfuscate.py
Then enter the password.
The result is an encrypted password.
"""
from seleniumbase.common import encryption
import getpass
import time
def main():
try:
while(1):
print("\nEnter password to obfuscate: (CTRL-C to exit)")
password = getpass.getpass()
print("Verify password:")
verify_password = getpass.getpass()
if password != verify_password:
print("*** ERROR: Passwords don't match! .. Please try again!")
continue
print("\nHere is the obfuscated password:")
time.sleep(0.07)
print(encryption.decrypt(password))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
Add the user interface for string/password obfuscation"""
Obfuscates a string/password into a string that can be decrypted later on.
Usage:
python obfuscate.py
Then enter the password.
The result is an encrypted password.
"""
from seleniumbase.common import encryption
import getpass
import time
def main():
try:
while(1):
print("\nEnter password to obfuscate: (CTRL-C to exit)")
password = getpass.getpass()
print("Verify password:")
verify_password = getpass.getpass()
if password != verify_password:
print("*** ERROR: Passwords don't match! .. Please try again!")
continue
print("\nHere is the obfuscated password:")
time.sleep(0.07)
print(encryption.decrypt(password))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the user interface for string/password obfuscation<commit_after>"""
Obfuscates a string/password into a string that can be decrypted later on.
Usage:
python obfuscate.py
Then enter the password.
The result is an encrypted password.
"""
from seleniumbase.common import encryption
import getpass
import time
def main():
try:
while(1):
print("\nEnter password to obfuscate: (CTRL-C to exit)")
password = getpass.getpass()
print("Verify password:")
verify_password = getpass.getpass()
if password != verify_password:
print("*** ERROR: Passwords don't match! .. Please try again!")
continue
print("\nHere is the obfuscated password:")
time.sleep(0.07)
print(encryption.decrypt(password))
time.sleep(0.21)
except:
print("\nExiting...\n")
if __name__ == "__main__":
main()
|
|
4cc7d842afbb9ca2f324886de1974e7dacb600dd
|
dlexperiment.py
|
dlexperiment.py
|
class Experiment(object):
def __init__(self, epochs=1):
self.epochs = epochs
def get_epochs(self):
return self.epochs
def train(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def set_loss(self):
raise NotImplementedError
def checkpoint(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
class PyTorchExperiment(object):
def save(self):
pass
|
Add outline parent class for defining experiments
|
Add outline parent class for defining experiments
|
Python
|
apache-2.0
|
sagelywizard/dlex
|
Add outline parent class for defining experiments
|
class Experiment(object):
def __init__(self, epochs=1):
self.epochs = epochs
def get_epochs(self):
return self.epochs
def train(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def set_loss(self):
raise NotImplementedError
def checkpoint(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
class PyTorchExperiment(object):
def save(self):
pass
|
<commit_before><commit_msg>Add outline parent class for defining experiments<commit_after>
|
class Experiment(object):
def __init__(self, epochs=1):
self.epochs = epochs
def get_epochs(self):
return self.epochs
def train(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def set_loss(self):
raise NotImplementedError
def checkpoint(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
class PyTorchExperiment(object):
def save(self):
pass
|
Add outline parent class for defining experimentsclass Experiment(object):
def __init__(self, epochs=1):
self.epochs = epochs
def get_epochs(self):
return self.epochs
def train(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def set_loss(self):
raise NotImplementedError
def checkpoint(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
class PyTorchExperiment(object):
def save(self):
pass
|
<commit_before><commit_msg>Add outline parent class for defining experiments<commit_after>class Experiment(object):
def __init__(self, epochs=1):
self.epochs = epochs
def get_epochs(self):
return self.epochs
def train(self):
raise NotImplementedError
def test(self):
raise NotImplementedError
def set_loss(self):
raise NotImplementedError
def checkpoint(self):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load(self):
raise NotImplementedError
def is_done(self):
raise NotImplementedError
class PyTorchExperiment(object):
def save(self):
pass
|
|
143500bc7064b05d0ce4a132cce1f48ddb1ecee7
|
rwt/tests/test_scripts.py
|
rwt/tests/test_scripts.py
|
import textwrap
import sys
import subprocess
def test_pkg_imported(tmpdir):
"""
Create a script that loads cython and ensure it runs.
"""
body = textwrap.dedent("""
import cython
print("Successfully imported cython")
""").lstrip()
script_file = tmpdir / 'script'
script_file.write_text(body, 'utf-8')
pip_args = ['cython']
cmd = [sys.executable, '-m', 'rwt'] + pip_args + ['--', str(script_file)]
out = subprocess.check_output(cmd, universal_newlines=True)
assert 'Successfully imported cython' in out
|
Add test for basic functionality
|
Add test for basic functionality
|
Python
|
mit
|
jaraco/rwt
|
Add test for basic functionality
|
import textwrap
import sys
import subprocess
def test_pkg_imported(tmpdir):
"""
Create a script that loads cython and ensure it runs.
"""
body = textwrap.dedent("""
import cython
print("Successfully imported cython")
""").lstrip()
script_file = tmpdir / 'script'
script_file.write_text(body, 'utf-8')
pip_args = ['cython']
cmd = [sys.executable, '-m', 'rwt'] + pip_args + ['--', str(script_file)]
out = subprocess.check_output(cmd, universal_newlines=True)
assert 'Successfully imported cython' in out
|
<commit_before><commit_msg>Add test for basic functionality<commit_after>
|
import textwrap
import sys
import subprocess
def test_pkg_imported(tmpdir):
"""
Create a script that loads cython and ensure it runs.
"""
body = textwrap.dedent("""
import cython
print("Successfully imported cython")
""").lstrip()
script_file = tmpdir / 'script'
script_file.write_text(body, 'utf-8')
pip_args = ['cython']
cmd = [sys.executable, '-m', 'rwt'] + pip_args + ['--', str(script_file)]
out = subprocess.check_output(cmd, universal_newlines=True)
assert 'Successfully imported cython' in out
|
Add test for basic functionalityimport textwrap
import sys
import subprocess
def test_pkg_imported(tmpdir):
"""
Create a script that loads cython and ensure it runs.
"""
body = textwrap.dedent("""
import cython
print("Successfully imported cython")
""").lstrip()
script_file = tmpdir / 'script'
script_file.write_text(body, 'utf-8')
pip_args = ['cython']
cmd = [sys.executable, '-m', 'rwt'] + pip_args + ['--', str(script_file)]
out = subprocess.check_output(cmd, universal_newlines=True)
assert 'Successfully imported cython' in out
|
<commit_before><commit_msg>Add test for basic functionality<commit_after>import textwrap
import sys
import subprocess
def test_pkg_imported(tmpdir):
"""
Create a script that loads cython and ensure it runs.
"""
body = textwrap.dedent("""
import cython
print("Successfully imported cython")
""").lstrip()
script_file = tmpdir / 'script'
script_file.write_text(body, 'utf-8')
pip_args = ['cython']
cmd = [sys.executable, '-m', 'rwt'] + pip_args + ['--', str(script_file)]
out = subprocess.check_output(cmd, universal_newlines=True)
assert 'Successfully imported cython' in out
|
|
83863814795bf2fc6ddf12c1a45aa531afb1662c
|
tests/backends/events_test.py
|
tests/backends/events_test.py
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
Test that backend actually sends the events
|
Test that backend actually sends the events
|
Python
|
apache-2.0
|
kingosticks/mopidy,diandiankan/mopidy,glogiotatidis/mopidy,vrs01/mopidy,dbrgn/mopidy,liamw9534/mopidy,SuperStarPL/mopidy,quartz55/mopidy,ZenithDK/mopidy,diandiankan/mopidy,ali/mopidy,kingosticks/mopidy,vrs01/mopidy,pacificIT/mopidy,ali/mopidy,diandiankan/mopidy,rawdlite/mopidy,vrs01/mopidy,bencevans/mopidy,tkem/mopidy,pacificIT/mopidy,swak/mopidy,pacificIT/mopidy,woutervanwijk/mopidy,tkem/mopidy,hkariti/mopidy,abarisain/mopidy,jcass77/mopidy,glogiotatidis/mopidy,jcass77/mopidy,dbrgn/mopidy,ZenithDK/mopidy,quartz55/mopidy,abarisain/mopidy,rawdlite/mopidy,rawdlite/mopidy,adamcik/mopidy,priestd09/mopidy,jcass77/mopidy,tkem/mopidy,mokieyue/mopidy,glogiotatidis/mopidy,SuperStarPL/mopidy,diandiankan/mopidy,ZenithDK/mopidy,swak/mopidy,tkem/mopidy,bencevans/mopidy,jmarsik/mopidy,bencevans/mopidy,SuperStarPL/mopidy,swak/mopidy,pacificIT/mopidy,priestd09/mopidy,bacontext/mopidy,quartz55/mopidy,adamcik/mopidy,mokieyue/mopidy,hkariti/mopidy,glogiotatidis/mopidy,mokieyue/mopidy,ZenithDK/mopidy,ali/mopidy,mokieyue/mopidy,hkariti/mopidy,jodal/mopidy,jmarsik/mopidy,priestd09/mopidy,liamw9534/mopidy,bencevans/mopidy,SuperStarPL/mopidy,swak/mopidy,adamcik/mopidy,jmarsik/mopidy,bacontext/mopidy,kingosticks/mopidy,woutervanwijk/mopidy,dbrgn/mopidy,hkariti/mopidy,jodal/mopidy,vrs01/mopidy,mopidy/mopidy,jodal/mopidy,ali/mopidy,quartz55/mopidy,rawdlite/mopidy,bacontext/mopidy,mopidy/mopidy,bacontext/mopidy,dbrgn/mopidy,mopidy/mopidy,jmarsik/mopidy
|
Test that backend actually sends the events
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
<commit_before><commit_msg>Test that backend actually sends the events<commit_after>
|
import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
Test that backend actually sends the eventsimport threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
<commit_before><commit_msg>Test that backend actually sends the events<commit_after>import threading
import unittest
from pykka.actor import ThreadingActor
from pykka.registry import ActorRegistry
from mopidy.backends.dummy import DummyBackend
from mopidy.listeners import BackendListener
from mopidy.models import Track
class BackendEventsTest(unittest.TestCase):
def setUp(self):
self.events = {
'started_playing': threading.Event(),
'stopped_playing': threading.Event(),
}
self.backend = DummyBackend.start().proxy()
self.listener = DummyBackendListener.start(self.events).proxy()
def tearDown(self):
ActorRegistry.stop_all()
def test_play_sends_started_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.events['started_playing'].wait(timeout=1)
self.assertTrue(self.events['started_playing'].is_set())
def test_stop_sends_stopped_playing_event(self):
self.backend.current_playlist.add([Track(uri='a')])
self.backend.playback.play()
self.backend.playback.stop()
self.events['stopped_playing'].wait(timeout=1)
self.assertTrue(self.events['stopped_playing'].is_set())
class DummyBackendListener(ThreadingActor, BackendListener):
def __init__(self, events):
self.events = events
def started_playing(self, track):
self.events['started_playing'].set()
def stopped_playing(self, track, stop_position):
self.events['stopped_playing'].set()
|
|
48c5e7ce3029ab67c755b6fa0c08412525bd0a0a
|
mapApp/utils/weather4all.py
|
mapApp/utils/weather4all.py
|
from mapApp.models import Incident, Weather
from mapApp.utils.weather import get_weather
import threading
import time
maxconnections = 5
semaphore = threading.Semaphore(maxconnections)
def run():
""" Create Weather instances for all Incidents in the application database if they do not already exist
"""
start_t = time.time()
threads = []
processed = 0
for incident in Incident.objects.all():
if hasattr(incident, 'weather'):
continue
else:
# Create a new Weather instance using a non-blocking thread
processed += 1
thread = WeatherThread(incident)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_t = time.time()
print processed, "Incidents processed in", end_t - start_t, "s"
class WeatherThread(threading.Thread):
def __init__(self, incident):
self.incident = incident
super(WeatherThread, self).__init__()
def run(self):
data = get_weather(self.incident.geom, self.incident.date)
semaphore.acquire()
Weather(
incident = self.incident,
temperature_c = data['temperatureC'],
visibility_km = data['visibilityKM'],
windspeed_kmh = data['windSpeedKMH'],
precip_mmh = data['precipMMH'],
precip_prob = data['precipProb'],
sunrise_time = data['sunriseTime'],
sunset_time = data['sunsetTime'],
dawn = data['dawn'],
dusk = data['dusk'],
wind_dir_deg = data['windDirDeg'],
wind_dir_str = data['windDirStr'],
black_ice_risk = data['blackIceRisk'],
summary = data['summary']
).save()
semaphore.release()
|
Write multithreaded script to get all weather for existing incidents in the database
|
Write multithreaded script to get all weather for existing incidents in the database
|
Python
|
mit
|
SPARLab/BikeMaps,SPARLab/BikeMaps,SPARLab/BikeMaps
|
Write multithreaded script to get all weather for existing incidents in the database
|
from mapApp.models import Incident, Weather
from mapApp.utils.weather import get_weather
import threading
import time
maxconnections = 5
semaphore = threading.Semaphore(maxconnections)
def run():
""" Create Weather instances for all Incidents in the application database if they do not already exist
"""
start_t = time.time()
threads = []
processed = 0
for incident in Incident.objects.all():
if hasattr(incident, 'weather'):
continue
else:
# Create a new Weather instance using a non-blocking thread
processed += 1
thread = WeatherThread(incident)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_t = time.time()
print processed, "Incidents processed in", end_t - start_t, "s"
class WeatherThread(threading.Thread):
def __init__(self, incident):
self.incident = incident
super(WeatherThread, self).__init__()
def run(self):
data = get_weather(self.incident.geom, self.incident.date)
semaphore.acquire()
Weather(
incident = self.incident,
temperature_c = data['temperatureC'],
visibility_km = data['visibilityKM'],
windspeed_kmh = data['windSpeedKMH'],
precip_mmh = data['precipMMH'],
precip_prob = data['precipProb'],
sunrise_time = data['sunriseTime'],
sunset_time = data['sunsetTime'],
dawn = data['dawn'],
dusk = data['dusk'],
wind_dir_deg = data['windDirDeg'],
wind_dir_str = data['windDirStr'],
black_ice_risk = data['blackIceRisk'],
summary = data['summary']
).save()
semaphore.release()
|
<commit_before><commit_msg>Write multithreaded script to get all weather for existing incidents in the database<commit_after>
|
from mapApp.models import Incident, Weather
from mapApp.utils.weather import get_weather
import threading
import time
maxconnections = 5
semaphore = threading.Semaphore(maxconnections)
def run():
""" Create Weather instances for all Incidents in the application database if they do not already exist
"""
start_t = time.time()
threads = []
processed = 0
for incident in Incident.objects.all():
if hasattr(incident, 'weather'):
continue
else:
# Create a new Weather instance using a non-blocking thread
processed += 1
thread = WeatherThread(incident)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_t = time.time()
print processed, "Incidents processed in", end_t - start_t, "s"
class WeatherThread(threading.Thread):
def __init__(self, incident):
self.incident = incident
super(WeatherThread, self).__init__()
def run(self):
data = get_weather(self.incident.geom, self.incident.date)
semaphore.acquire()
Weather(
incident = self.incident,
temperature_c = data['temperatureC'],
visibility_km = data['visibilityKM'],
windspeed_kmh = data['windSpeedKMH'],
precip_mmh = data['precipMMH'],
precip_prob = data['precipProb'],
sunrise_time = data['sunriseTime'],
sunset_time = data['sunsetTime'],
dawn = data['dawn'],
dusk = data['dusk'],
wind_dir_deg = data['windDirDeg'],
wind_dir_str = data['windDirStr'],
black_ice_risk = data['blackIceRisk'],
summary = data['summary']
).save()
semaphore.release()
|
Write multithreaded script to get all weather for existing incidents in the databasefrom mapApp.models import Incident, Weather
from mapApp.utils.weather import get_weather
import threading
import time
maxconnections = 5
semaphore = threading.Semaphore(maxconnections)
def run():
""" Create Weather instances for all Incidents in the application database if they do not already exist
"""
start_t = time.time()
threads = []
processed = 0
for incident in Incident.objects.all():
if hasattr(incident, 'weather'):
continue
else:
# Create a new Weather instance using a non-blocking thread
processed += 1
thread = WeatherThread(incident)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_t = time.time()
print processed, "Incidents processed in", end_t - start_t, "s"
class WeatherThread(threading.Thread):
def __init__(self, incident):
self.incident = incident
super(WeatherThread, self).__init__()
def run(self):
data = get_weather(self.incident.geom, self.incident.date)
semaphore.acquire()
Weather(
incident = self.incident,
temperature_c = data['temperatureC'],
visibility_km = data['visibilityKM'],
windspeed_kmh = data['windSpeedKMH'],
precip_mmh = data['precipMMH'],
precip_prob = data['precipProb'],
sunrise_time = data['sunriseTime'],
sunset_time = data['sunsetTime'],
dawn = data['dawn'],
dusk = data['dusk'],
wind_dir_deg = data['windDirDeg'],
wind_dir_str = data['windDirStr'],
black_ice_risk = data['blackIceRisk'],
summary = data['summary']
).save()
semaphore.release()
|
<commit_before><commit_msg>Write multithreaded script to get all weather for existing incidents in the database<commit_after>from mapApp.models import Incident, Weather
from mapApp.utils.weather import get_weather
import threading
import time
maxconnections = 5
semaphore = threading.Semaphore(maxconnections)
def run():
""" Create Weather instances for all Incidents in the application database if they do not already exist
"""
start_t = time.time()
threads = []
processed = 0
for incident in Incident.objects.all():
if hasattr(incident, 'weather'):
continue
else:
# Create a new Weather instance using a non-blocking thread
processed += 1
thread = WeatherThread(incident)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
end_t = time.time()
print processed, "Incidents processed in", end_t - start_t, "s"
class WeatherThread(threading.Thread):
def __init__(self, incident):
self.incident = incident
super(WeatherThread, self).__init__()
def run(self):
data = get_weather(self.incident.geom, self.incident.date)
semaphore.acquire()
Weather(
incident = self.incident,
temperature_c = data['temperatureC'],
visibility_km = data['visibilityKM'],
windspeed_kmh = data['windSpeedKMH'],
precip_mmh = data['precipMMH'],
precip_prob = data['precipProb'],
sunrise_time = data['sunriseTime'],
sunset_time = data['sunsetTime'],
dawn = data['dawn'],
dusk = data['dusk'],
wind_dir_deg = data['windDirDeg'],
wind_dir_str = data['windDirStr'],
black_ice_risk = data['blackIceRisk'],
summary = data['summary']
).save()
semaphore.release()
|
|
c5631d19baecb2884d4dbc98ecaa333c615efbab
|
fabix/aws/s3.py
|
fabix/aws/s3.py
|
import os
import boto
from boto.utils import compute_md5
from fabric.decorators import task
from fabric.utils import puts
def upload_file(bucket, key_name, file_path, policy='public-read'):
key = bucket.new_key(key_name)
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
key.set_metadata('fabix-md5', md5[0])
key.set_contents_from_filename(file_path, md5=md5, policy=policy)
return key
def get_key_name(local_path, fullpath):
key_name = fullpath[len(local_path):]
l = key_name.split(os.sep)
key_name = '/'.join(l)
return key_name.lstrip('/')
@task
def sync_dir_up(bucket_name, local_path):
puts("Sync directory {0} with bucket {1}".format(bucket_name, local_path))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
for root, dirs, files in os.walk(local_path):
for fname in files:
file_path = os.path.join(root, fname)
key_name = get_key_name(local_path, file_path)
key = bucket.lookup(key_name)
if key:
key_md5 = key.get_metadata('fabix-md5')
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
if md5[0] == key_md5:
puts("Skipping {0} (MD5 match)".format(file_path))
continue
puts("Upload file {0}".format(file_path))
upload_file(bucket, key_name, file_path)
|
Add task to upload a dir to S3
|
Add task to upload a dir to S3
|
Python
|
mit
|
vmalavolta/fabix
|
Add task to upload a dir to S3
|
import os
import boto
from boto.utils import compute_md5
from fabric.decorators import task
from fabric.utils import puts
def upload_file(bucket, key_name, file_path, policy='public-read'):
key = bucket.new_key(key_name)
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
key.set_metadata('fabix-md5', md5[0])
key.set_contents_from_filename(file_path, md5=md5, policy=policy)
return key
def get_key_name(local_path, fullpath):
key_name = fullpath[len(local_path):]
l = key_name.split(os.sep)
key_name = '/'.join(l)
return key_name.lstrip('/')
@task
def sync_dir_up(bucket_name, local_path):
puts("Sync directory {0} with bucket {1}".format(bucket_name, local_path))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
for root, dirs, files in os.walk(local_path):
for fname in files:
file_path = os.path.join(root, fname)
key_name = get_key_name(local_path, file_path)
key = bucket.lookup(key_name)
if key:
key_md5 = key.get_metadata('fabix-md5')
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
if md5[0] == key_md5:
puts("Skipping {0} (MD5 match)".format(file_path))
continue
puts("Upload file {0}".format(file_path))
upload_file(bucket, key_name, file_path)
|
<commit_before><commit_msg>Add task to upload a dir to S3<commit_after>
|
import os
import boto
from boto.utils import compute_md5
from fabric.decorators import task
from fabric.utils import puts
def upload_file(bucket, key_name, file_path, policy='public-read'):
key = bucket.new_key(key_name)
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
key.set_metadata('fabix-md5', md5[0])
key.set_contents_from_filename(file_path, md5=md5, policy=policy)
return key
def get_key_name(local_path, fullpath):
key_name = fullpath[len(local_path):]
l = key_name.split(os.sep)
key_name = '/'.join(l)
return key_name.lstrip('/')
@task
def sync_dir_up(bucket_name, local_path):
puts("Sync directory {0} with bucket {1}".format(bucket_name, local_path))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
for root, dirs, files in os.walk(local_path):
for fname in files:
file_path = os.path.join(root, fname)
key_name = get_key_name(local_path, file_path)
key = bucket.lookup(key_name)
if key:
key_md5 = key.get_metadata('fabix-md5')
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
if md5[0] == key_md5:
puts("Skipping {0} (MD5 match)".format(file_path))
continue
puts("Upload file {0}".format(file_path))
upload_file(bucket, key_name, file_path)
|
Add task to upload a dir to S3import os
import boto
from boto.utils import compute_md5
from fabric.decorators import task
from fabric.utils import puts
def upload_file(bucket, key_name, file_path, policy='public-read'):
key = bucket.new_key(key_name)
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
key.set_metadata('fabix-md5', md5[0])
key.set_contents_from_filename(file_path, md5=md5, policy=policy)
return key
def get_key_name(local_path, fullpath):
key_name = fullpath[len(local_path):]
l = key_name.split(os.sep)
key_name = '/'.join(l)
return key_name.lstrip('/')
@task
def sync_dir_up(bucket_name, local_path):
puts("Sync directory {0} with bucket {1}".format(bucket_name, local_path))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
for root, dirs, files in os.walk(local_path):
for fname in files:
file_path = os.path.join(root, fname)
key_name = get_key_name(local_path, file_path)
key = bucket.lookup(key_name)
if key:
key_md5 = key.get_metadata('fabix-md5')
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
if md5[0] == key_md5:
puts("Skipping {0} (MD5 match)".format(file_path))
continue
puts("Upload file {0}".format(file_path))
upload_file(bucket, key_name, file_path)
|
<commit_before><commit_msg>Add task to upload a dir to S3<commit_after>import os
import boto
from boto.utils import compute_md5
from fabric.decorators import task
from fabric.utils import puts
def upload_file(bucket, key_name, file_path, policy='public-read'):
key = bucket.new_key(key_name)
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
key.set_metadata('fabix-md5', md5[0])
key.set_contents_from_filename(file_path, md5=md5, policy=policy)
return key
def get_key_name(local_path, fullpath):
key_name = fullpath[len(local_path):]
l = key_name.split(os.sep)
key_name = '/'.join(l)
return key_name.lstrip('/')
@task
def sync_dir_up(bucket_name, local_path):
puts("Sync directory {0} with bucket {1}".format(bucket_name, local_path))
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
for root, dirs, files in os.walk(local_path):
for fname in files:
file_path = os.path.join(root, fname)
key_name = get_key_name(local_path, file_path)
key = bucket.lookup(key_name)
if key:
key_md5 = key.get_metadata('fabix-md5')
fd = open(file_path)
md5 = compute_md5(fd)
fd.close()
if md5[0] == key_md5:
puts("Skipping {0} (MD5 match)".format(file_path))
continue
puts("Upload file {0}".format(file_path))
upload_file(bucket, key_name, file_path)
|
|
4a2be1f9526f66ee9c909bcfa1f987d696dafeac
|
oneflow/settings/snippets/common_development.py
|
oneflow/settings/snippets/common_development.py
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDRESS = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
Fix the devserver default address setting name.
|
Fix the devserver default address setting name.
|
Python
|
agpl-3.0
|
WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDRESS = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
Fix the devserver default address setting name.
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
<commit_before>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDRESS = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
<commit_msg>Fix the devserver default address setting name.<commit_after>
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDRESS = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
Fix the devserver default address setting name.#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
<commit_before>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDRESS = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
<commit_msg>Fix the devserver default address setting name.<commit_after>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', 'devserver', )
DEVSERVER_DEFAULT_ADDR = '0.0.0.0'
DEVSERVER_DEFAULT_PORT = 8000
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
6c885eb92d7e150e39297d32c45fc8d69aae099f
|
tools/update-qibuild-cmake.py
|
tools/update-qibuild-cmake.py
|
## Copyright (C) 2011 Aldebaran Robotics
# Update all the qibuild.cmake files in a git repo
# (this will be done automatically by `qibuild configure`,
# but this tool can still be handy)
import os
import argparse
import subprocess
import shutil
def update_qibuild_cmake(template_path, git_repo):
""" Update qibuild.cmake files in a git repo.
- Check that repo is clean
- Find all the qibuild.cmake files in the git repo,
replace them by the template
- Prepare a commit with a nice message
"""
try:
out = subprocess.check_output(["git", "status"], cwd=git_repo)
except subprocess.CalledProcessError, e:
mess = "Could not run git status in %s\n" % git_repo
mess += "Error was:\n"
mess += str(e)
raise Exception(mess)
if not "clean" in out:
mess = "git repo not clean\n"
mess += "Git status said:\n"
mess += out
raise Exception(mess)
out = subprocess.check_output(["git", "ls-files"], cwd=git_repo)
filenames = out.splitlines()
for filename in filenames:
basename = os.path.basename(filename)
full_path = os.path.join(git_repo, filename)
if basename == "qibuild.cmake":
print "patching", filename
shutil.copy(template_path, full_path)
subprocess.check_call(["git", "commit", "-a", "-m", "update qibuild.cmake"],
cwd=git_repo)
def main():
""" Parse command line arguments """
parser = argparse.ArgumentParser()
parser.add_argument("template_path")
parser.add_argument("git_repo")
args = parser.parse_args()
update_qibuild_cmake(args.template_path, args.git_repo)
if __name__ == "__main__":
main()
|
Add a small too to update qibuild.cmake inside git repos
|
Add a small too to update qibuild.cmake inside git repos
|
Python
|
bsd-3-clause
|
dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,aldebaran/qibuild
|
Add a small too to update qibuild.cmake inside git repos
|
## Copyright (C) 2011 Aldebaran Robotics
# Update all the qibuild.cmake files in a git repo
# (this will be done automatically by `qibuild configure`,
# but this tool can still be handy)
import os
import argparse
import subprocess
import shutil
def update_qibuild_cmake(template_path, git_repo):
""" Update qibuild.cmake files in a git repo.
- Check that repo is clean
- Find all the qibuild.cmake files in the git repo,
replace them by the template
- Prepare a commit with a nice message
"""
try:
out = subprocess.check_output(["git", "status"], cwd=git_repo)
except subprocess.CalledProcessError, e:
mess = "Could not run git status in %s\n" % git_repo
mess += "Error was:\n"
mess += str(e)
raise Exception(mess)
if not "clean" in out:
mess = "git repo not clean\n"
mess += "Git status said:\n"
mess += out
raise Exception(mess)
out = subprocess.check_output(["git", "ls-files"], cwd=git_repo)
filenames = out.splitlines()
for filename in filenames:
basename = os.path.basename(filename)
full_path = os.path.join(git_repo, filename)
if basename == "qibuild.cmake":
print "patching", filename
shutil.copy(template_path, full_path)
subprocess.check_call(["git", "commit", "-a", "-m", "update qibuild.cmake"],
cwd=git_repo)
def main():
""" Parse command line arguments """
parser = argparse.ArgumentParser()
parser.add_argument("template_path")
parser.add_argument("git_repo")
args = parser.parse_args()
update_qibuild_cmake(args.template_path, args.git_repo)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a small too to update qibuild.cmake inside git repos<commit_after>
|
## Copyright (C) 2011 Aldebaran Robotics
# Update all the qibuild.cmake files in a git repo
# (this will be done automatically by `qibuild configure`,
# but this tool can still be handy)
import os
import argparse
import subprocess
import shutil
def update_qibuild_cmake(template_path, git_repo):
""" Update qibuild.cmake files in a git repo.
- Check that repo is clean
- Find all the qibuild.cmake files in the git repo,
replace them by the template
- Prepare a commit with a nice message
"""
try:
out = subprocess.check_output(["git", "status"], cwd=git_repo)
except subprocess.CalledProcessError, e:
mess = "Could not run git status in %s\n" % git_repo
mess += "Error was:\n"
mess += str(e)
raise Exception(mess)
if not "clean" in out:
mess = "git repo not clean\n"
mess += "Git status said:\n"
mess += out
raise Exception(mess)
out = subprocess.check_output(["git", "ls-files"], cwd=git_repo)
filenames = out.splitlines()
for filename in filenames:
basename = os.path.basename(filename)
full_path = os.path.join(git_repo, filename)
if basename == "qibuild.cmake":
print "patching", filename
shutil.copy(template_path, full_path)
subprocess.check_call(["git", "commit", "-a", "-m", "update qibuild.cmake"],
cwd=git_repo)
def main():
""" Parse command line arguments """
parser = argparse.ArgumentParser()
parser.add_argument("template_path")
parser.add_argument("git_repo")
args = parser.parse_args()
update_qibuild_cmake(args.template_path, args.git_repo)
if __name__ == "__main__":
main()
|
Add a small too to update qibuild.cmake inside git repos## Copyright (C) 2011 Aldebaran Robotics
# Update all the qibuild.cmake files in a git repo
# (this will be done automatically by `qibuild configure`,
# but this tool can still be handy)
import os
import argparse
import subprocess
import shutil
def update_qibuild_cmake(template_path, git_repo):
""" Update qibuild.cmake files in a git repo.
- Check that repo is clean
- Find all the qibuild.cmake files in the git repo,
replace them by the template
- Prepare a commit with a nice message
"""
try:
out = subprocess.check_output(["git", "status"], cwd=git_repo)
except subprocess.CalledProcessError, e:
mess = "Could not run git status in %s\n" % git_repo
mess += "Error was:\n"
mess += str(e)
raise Exception(mess)
if not "clean" in out:
mess = "git repo not clean\n"
mess += "Git status said:\n"
mess += out
raise Exception(mess)
out = subprocess.check_output(["git", "ls-files"], cwd=git_repo)
filenames = out.splitlines()
for filename in filenames:
basename = os.path.basename(filename)
full_path = os.path.join(git_repo, filename)
if basename == "qibuild.cmake":
print "patching", filename
shutil.copy(template_path, full_path)
subprocess.check_call(["git", "commit", "-a", "-m", "update qibuild.cmake"],
cwd=git_repo)
def main():
""" Parse command line arguments """
parser = argparse.ArgumentParser()
parser.add_argument("template_path")
parser.add_argument("git_repo")
args = parser.parse_args()
update_qibuild_cmake(args.template_path, args.git_repo)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a small too to update qibuild.cmake inside git repos<commit_after>## Copyright (C) 2011 Aldebaran Robotics
# Update all the qibuild.cmake files in a git repo
# (this will be done automatically by `qibuild configure`,
# but this tool can still be handy)
import os
import argparse
import subprocess
import shutil
def update_qibuild_cmake(template_path, git_repo):
""" Update qibuild.cmake files in a git repo.
- Check that repo is clean
- Find all the qibuild.cmake files in the git repo,
replace them by the template
- Prepare a commit with a nice message
"""
try:
out = subprocess.check_output(["git", "status"], cwd=git_repo)
except subprocess.CalledProcessError, e:
mess = "Could not run git status in %s\n" % git_repo
mess += "Error was:\n"
mess += str(e)
raise Exception(mess)
if not "clean" in out:
mess = "git repo not clean\n"
mess += "Git status said:\n"
mess += out
raise Exception(mess)
out = subprocess.check_output(["git", "ls-files"], cwd=git_repo)
filenames = out.splitlines()
for filename in filenames:
basename = os.path.basename(filename)
full_path = os.path.join(git_repo, filename)
if basename == "qibuild.cmake":
print "patching", filename
shutil.copy(template_path, full_path)
subprocess.check_call(["git", "commit", "-a", "-m", "update qibuild.cmake"],
cwd=git_repo)
def main():
""" Parse command line arguments """
parser = argparse.ArgumentParser()
parser.add_argument("template_path")
parser.add_argument("git_repo")
args = parser.parse_args()
update_qibuild_cmake(args.template_path, args.git_repo)
if __name__ == "__main__":
main()
|
|
3bf027eaf2c62ec6fcb3192cfddc5a2aa8b73895
|
oneflow/settings/chani.py
|
oneflow/settings/chani.py
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost'
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost:8000'
EMAIL_HOST = 'gurney'
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/1flow.mail'
|
Make mail working on my dev machine.
|
Make mail working on my dev machine.
|
Python
|
agpl-3.0
|
1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost'
Make mail working on my dev machine.
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost:8000'
EMAIL_HOST = 'gurney'
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/1flow.mail'
|
<commit_before># -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost'
<commit_msg>Make mail working on my dev machine.<commit_after>
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost:8000'
EMAIL_HOST = 'gurney'
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/1flow.mail'
|
# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost'
Make mail working on my dev machine.# -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost:8000'
EMAIL_HOST = 'gurney'
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/1flow.mail'
|
<commit_before># -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost'
<commit_msg>Make mail working on my dev machine.<commit_after># -*- coding: utf-8 -*-
# Settings for 1flow.net (local development)
import os
from sparks.django.settings import include_snippets
include_snippets(
os.path.dirname(__file__), (
'00_development',
# Activate this to test 404/500…
#'00_production',
'1flow_io',
'common',
'db_common',
'db_development',
'cache_common',
'cache_development',
'mail_development',
'raven_development',
'common_development',
'rosetta',
'djdt',
),
globals()
)
# Override `1flow_net` for local development
SITE_DOMAIN = 'localhost:8000'
EMAIL_HOST = 'gurney'
#EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
#EMAIL_FILE_PATH = '/tmp/1flow.mail'
|
76c2be209e04e38b4730a07a5155e3e22501027a
|
som_generationkwh/migrations/0.0.1.1/pre-0001_delete_inactive_investments.py
|
som_generationkwh/migrations/0.0.1.1/pre-0001_delete_inactive_investments.py
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.1: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.1: Hem entrat al down"
pass
# vim: ts=4 sw=4 et
|
Change version terp and move migration folder
|
Change version terp and move migration folder
|
Python
|
agpl-3.0
|
Som-Energia/somenergia-generationkwh,Som-Energia/somenergia-generationkwh
|
Change version terp and move migration folder
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.1: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.1: Hem entrat al down"
pass
# vim: ts=4 sw=4 et
|
<commit_before><commit_msg>Change version terp and move migration folder<commit_after>
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.1: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.1: Hem entrat al down"
pass
# vim: ts=4 sw=4 et
|
Change version terp and move migration folder# coding=utf-8
from oopgrade import oopgrade
import netsvc
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.1: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.1: Hem entrat al down"
pass
# vim: ts=4 sw=4 et
|
<commit_before><commit_msg>Change version terp and move migration folder<commit_after># coding=utf-8
from oopgrade import oopgrade
import netsvc
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.1: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.1: Hem entrat al down"
pass
# vim: ts=4 sw=4 et
|
|
977fb316f3a27f6516fac2a6323545381fdd7cc0
|
sparts/tasks/periodic.py
|
sparts/tasks/periodic.py
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
def execute(self, context=None):
self.logger.debug('execute')
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
t0 = time.time()
while not self.service._stop:
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
t0 = time.time()
def execute(self, context=None):
self.logger.debug('execute')
|
Fix time calculation for TryLater in PeriodicTasks
|
Fix time calculation for TryLater in PeriodicTasks
|
Python
|
bsd-3-clause
|
djipko/sparts,pshuff/sparts,fmoo/sparts,bboozzoo/sparts,djipko/sparts,pshuff/sparts,bboozzoo/sparts,facebook/sparts,facebook/sparts,fmoo/sparts
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
def execute(self, context=None):
self.logger.debug('execute')
Fix time calculation for TryLater in PeriodicTasks
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
t0 = time.time()
while not self.service._stop:
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
t0 = time.time()
def execute(self, context=None):
self.logger.debug('execute')
|
<commit_before>from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
def execute(self, context=None):
self.logger.debug('execute')
<commit_msg>Fix time calculation for TryLater in PeriodicTasks<commit_after>
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
t0 = time.time()
while not self.service._stop:
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
t0 = time.time()
def execute(self, context=None):
self.logger.debug('execute')
|
from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
def execute(self, context=None):
self.logger.debug('execute')
Fix time calculation for TryLater in PeriodicTasksfrom ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
t0 = time.time()
while not self.service._stop:
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
t0 = time.time()
def execute(self, context=None):
self.logger.debug('execute')
|
<commit_before>from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
while not self.service._stop:
t0 = time.time()
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
def execute(self, context=None):
self.logger.debug('execute')
<commit_msg>Fix time calculation for TryLater in PeriodicTasks<commit_after>from ..vtask import VTask, TryLater
import time
from ..sparts import option, counter, samples, SampleType
from threading import Event
class PeriodicTask(VTask):
INTERVAL = None
execute_duration = samples(windows=[60, 240],
types=[SampleType.AVG, SampleType.MAX, SampleType.MIN])
n_iterations = counter()
n_slow_iterations = counter()
n_try_later = counter()
interval = option(type=float, metavar='SECONDS',
default=lambda cls: cls.INTERVAL,
help='How often this task should run [%(default)s] (s)')
def initTask(self):
super(PeriodicTask, self).initTask()
assert self.getTaskOption('interval') is not None
self.stop_event = Event()
def stop(self):
self.stop_event.set()
super(PeriodicTask, self).stop()
def _runloop(self):
t0 = time.time()
while not self.service._stop:
try:
self.execute()
except TryLater:
self.n_try_later.increment()
continue
self.n_iterations.increment()
self.execute_duration.add(time.time() - t0)
to_sleep = (t0 + self.interval) - time.time()
if to_sleep > 0:
if self.stop_event.wait(to_sleep):
return
else:
self.n_slow_iterations.increment()
t0 = time.time()
def execute(self, context=None):
self.logger.debug('execute')
|
5cabd890b65102e3a28fa864af45bec19ee0e601
|
index_converter.py
|
index_converter.py
|
# -*- coding: utf-8 -*-
"""Converting the index from the Archives in a suitable format."""
__authors__ = 'User:Jean-Frédéric'
import codecs
from uploadlibrary import UnicodeCSV
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
def read_csv(csv_file, delimiter):
"""Read the CSV file and return each line."""
file_handler = codecs.open(csv_file, 'r', 'utf-8')
return UnicodeCSV.unicode_csv_dictreader(file_handler,
delimiter=delimiter)
def main():
"""Main method."""
csvreader = read_csv('index.csv', ';')
mapper = {}
for row in csvreader:
print row
if __name__ == "__main__":
main()
|
Add script to convert the archives index
|
Add script to convert the archives index
The Archives provided us with an index mapping a place
to a range of identifier. This needs some work to be useful.
|
Python
|
mit
|
JeanFred/TrutatBis
|
Add script to convert the archives index
The Archives provided us with an index mapping a place
to a range of identifier. This needs some work to be useful.
|
# -*- coding: utf-8 -*-
"""Converting the index from the Archives in a suitable format."""
__authors__ = 'User:Jean-Frédéric'
import codecs
from uploadlibrary import UnicodeCSV
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
def read_csv(csv_file, delimiter):
"""Read the CSV file and return each line."""
file_handler = codecs.open(csv_file, 'r', 'utf-8')
return UnicodeCSV.unicode_csv_dictreader(file_handler,
delimiter=delimiter)
def main():
"""Main method."""
csvreader = read_csv('index.csv', ';')
mapper = {}
for row in csvreader:
print row
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to convert the archives index
The Archives provided us with an index mapping a place
to a range of identifier. This needs some work to be useful.<commit_after>
|
# -*- coding: utf-8 -*-
"""Converting the index from the Archives in a suitable format."""
__authors__ = 'User:Jean-Frédéric'
import codecs
from uploadlibrary import UnicodeCSV
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
def read_csv(csv_file, delimiter):
"""Read the CSV file and return each line."""
file_handler = codecs.open(csv_file, 'r', 'utf-8')
return UnicodeCSV.unicode_csv_dictreader(file_handler,
delimiter=delimiter)
def main():
"""Main method."""
csvreader = read_csv('index.csv', ';')
mapper = {}
for row in csvreader:
print row
if __name__ == "__main__":
main()
|
Add script to convert the archives index
The Archives provided us with an index mapping a place
to a range of identifier. This needs some work to be useful.# -*- coding: utf-8 -*-
"""Converting the index from the Archives in a suitable format."""
__authors__ = 'User:Jean-Frédéric'
import codecs
from uploadlibrary import UnicodeCSV
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
def read_csv(csv_file, delimiter):
"""Read the CSV file and return each line."""
file_handler = codecs.open(csv_file, 'r', 'utf-8')
return UnicodeCSV.unicode_csv_dictreader(file_handler,
delimiter=delimiter)
def main():
"""Main method."""
csvreader = read_csv('index.csv', ';')
mapper = {}
for row in csvreader:
print row
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to convert the archives index
The Archives provided us with an index mapping a place
to a range of identifier. This needs some work to be useful.<commit_after># -*- coding: utf-8 -*-
"""Converting the index from the Archives in a suitable format."""
__authors__ = 'User:Jean-Frédéric'
import codecs
from uploadlibrary import UnicodeCSV
import sys
import re
reload(sys)
sys.setdefaultencoding('utf-8')
def read_csv(csv_file, delimiter):
"""Read the CSV file and return each line."""
file_handler = codecs.open(csv_file, 'r', 'utf-8')
return UnicodeCSV.unicode_csv_dictreader(file_handler,
delimiter=delimiter)
def main():
"""Main method."""
csvreader = read_csv('index.csv', ';')
mapper = {}
for row in csvreader:
print row
if __name__ == "__main__":
main()
|
|
bb3502e96cd4c3636a75dbf5c3156c2b54e54577
|
timeside/server/management/commands/timeside-celery-worker.py
|
timeside/server/management/commands/timeside-celery-worker.py
|
import shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
# thanks to https://medium.com/aubergine-solutions/auto-reload-development-mode-for-celery-worker-using-docker-compose-and-django-management-2ba8e313eb37
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery worker -A worker'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.main(restart_celery, args=None, kwargs=None)
|
Add worker celery reload command prototype
|
Add worker celery reload command prototype
|
Python
|
agpl-3.0
|
Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide,Parisson/TimeSide
|
Add worker celery reload command prototype
|
import shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
# thanks to https://medium.com/aubergine-solutions/auto-reload-development-mode-for-celery-worker-using-docker-compose-and-django-management-2ba8e313eb37
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery worker -A worker'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.main(restart_celery, args=None, kwargs=None)
|
<commit_before><commit_msg>Add worker celery reload command prototype<commit_after>
|
import shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
# thanks to https://medium.com/aubergine-solutions/auto-reload-development-mode-for-celery-worker-using-docker-compose-and-django-management-2ba8e313eb37
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery worker -A worker'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.main(restart_celery, args=None, kwargs=None)
|
Add worker celery reload command prototypeimport shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
# thanks to https://medium.com/aubergine-solutions/auto-reload-development-mode-for-celery-worker-using-docker-compose-and-django-management-2ba8e313eb37
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery worker -A worker'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.main(restart_celery, args=None, kwargs=None)
|
<commit_before><commit_msg>Add worker celery reload command prototype<commit_after>import shlex
import subprocess
from django.core.management.base import BaseCommand
from django.utils import autoreload
# thanks to https://medium.com/aubergine-solutions/auto-reload-development-mode-for-celery-worker-using-docker-compose-and-django-management-2ba8e313eb37
def restart_celery(*args, **kwargs):
kill_worker_cmd = 'pkill -9 celery'
subprocess.call(shlex.split(kill_worker_cmd))
start_worker_cmd = 'celery worker -A worker'
subprocess.call(shlex.split(start_worker_cmd))
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Starting celery worker with autoreload...')
autoreload.main(restart_celery, args=None, kwargs=None)
|
|
953bc05e048cb7e00db540254103ec86664ad1d6
|
analysis/11-compress-jacobians.py
|
analysis/11-compress-jacobians.py
|
import climate
import logging
import numpy as np
import os
from sklearn.decomposition import PCA
def compress(source, k='mle', key='jac'):
filenames = sorted(fn for fn in os.listdir(source)
if key in fn and fn.endswith('.npy'))
logging.info('%s: found %d jacobians matching %s',
source, len(filenames), key)
arrays = [np.load(os.path.join(source, fn)) for fn in filenames]
for arr, fn in zip(arrays, filenames):
if np.isnan(arr).any():
logging.info('%s: %s contains %d nans!', fn, arr.shape, np.isnan(arr).sum())
pca = PCA(n_components=k if k == 'mle' else int(k))
pca.fit(np.vstack(arrays))
for arr, fn in zip(arrays, filenames):
out = os.path.join(source, fn.replace(key, '{}_{}'.format(key, k)))
karr = pca.transform(arr)
logging.info('%s: saving %s', out, karr.shape)
np.save(out, karr)
out = os.path.join(source, 'pca_{}_{}.pkl'.format(key, k))
pickle.dump(open(out, 'wb'), pca)
def main(root, k='mle'):
for subject in sorted(os.listdir(root)):
compress(os.path.join(root, subject), k, 'jac')
if __name__ == '__main__':
climate.call(main)
|
Add script for PCA compressing jacobian arrays.
|
Add script for PCA compressing jacobian arrays.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add script for PCA compressing jacobian arrays.
|
import climate
import logging
import numpy as np
import os
from sklearn.decomposition import PCA
def compress(source, k='mle', key='jac'):
filenames = sorted(fn for fn in os.listdir(source)
if key in fn and fn.endswith('.npy'))
logging.info('%s: found %d jacobians matching %s',
source, len(filenames), key)
arrays = [np.load(os.path.join(source, fn)) for fn in filenames]
for arr, fn in zip(arrays, filenames):
if np.isnan(arr).any():
logging.info('%s: %s contains %d nans!', fn, arr.shape, np.isnan(arr).sum())
pca = PCA(n_components=k if k == 'mle' else int(k))
pca.fit(np.vstack(arrays))
for arr, fn in zip(arrays, filenames):
out = os.path.join(source, fn.replace(key, '{}_{}'.format(key, k)))
karr = pca.transform(arr)
logging.info('%s: saving %s', out, karr.shape)
np.save(out, karr)
out = os.path.join(source, 'pca_{}_{}.pkl'.format(key, k))
pickle.dump(open(out, 'wb'), pca)
def main(root, k='mle'):
for subject in sorted(os.listdir(root)):
compress(os.path.join(root, subject), k, 'jac')
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for PCA compressing jacobian arrays.<commit_after>
|
import climate
import logging
import numpy as np
import os
from sklearn.decomposition import PCA
def compress(source, k='mle', key='jac'):
filenames = sorted(fn for fn in os.listdir(source)
if key in fn and fn.endswith('.npy'))
logging.info('%s: found %d jacobians matching %s',
source, len(filenames), key)
arrays = [np.load(os.path.join(source, fn)) for fn in filenames]
for arr, fn in zip(arrays, filenames):
if np.isnan(arr).any():
logging.info('%s: %s contains %d nans!', fn, arr.shape, np.isnan(arr).sum())
pca = PCA(n_components=k if k == 'mle' else int(k))
pca.fit(np.vstack(arrays))
for arr, fn in zip(arrays, filenames):
out = os.path.join(source, fn.replace(key, '{}_{}'.format(key, k)))
karr = pca.transform(arr)
logging.info('%s: saving %s', out, karr.shape)
np.save(out, karr)
out = os.path.join(source, 'pca_{}_{}.pkl'.format(key, k))
pickle.dump(open(out, 'wb'), pca)
def main(root, k='mle'):
for subject in sorted(os.listdir(root)):
compress(os.path.join(root, subject), k, 'jac')
if __name__ == '__main__':
climate.call(main)
|
Add script for PCA compressing jacobian arrays.import climate
import logging
import numpy as np
import os
from sklearn.decomposition import PCA
def compress(source, k='mle', key='jac'):
filenames = sorted(fn for fn in os.listdir(source)
if key in fn and fn.endswith('.npy'))
logging.info('%s: found %d jacobians matching %s',
source, len(filenames), key)
arrays = [np.load(os.path.join(source, fn)) for fn in filenames]
for arr, fn in zip(arrays, filenames):
if np.isnan(arr).any():
logging.info('%s: %s contains %d nans!', fn, arr.shape, np.isnan(arr).sum())
pca = PCA(n_components=k if k == 'mle' else int(k))
pca.fit(np.vstack(arrays))
for arr, fn in zip(arrays, filenames):
out = os.path.join(source, fn.replace(key, '{}_{}'.format(key, k)))
karr = pca.transform(arr)
logging.info('%s: saving %s', out, karr.shape)
np.save(out, karr)
out = os.path.join(source, 'pca_{}_{}.pkl'.format(key, k))
pickle.dump(open(out, 'wb'), pca)
def main(root, k='mle'):
for subject in sorted(os.listdir(root)):
compress(os.path.join(root, subject), k, 'jac')
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for PCA compressing jacobian arrays.<commit_after>import climate
import logging
import numpy as np
import os
from sklearn.decomposition import PCA
def compress(source, k='mle', key='jac'):
filenames = sorted(fn for fn in os.listdir(source)
if key in fn and fn.endswith('.npy'))
logging.info('%s: found %d jacobians matching %s',
source, len(filenames), key)
arrays = [np.load(os.path.join(source, fn)) for fn in filenames]
for arr, fn in zip(arrays, filenames):
if np.isnan(arr).any():
logging.info('%s: %s contains %d nans!', fn, arr.shape, np.isnan(arr).sum())
pca = PCA(n_components=k if k == 'mle' else int(k))
pca.fit(np.vstack(arrays))
for arr, fn in zip(arrays, filenames):
out = os.path.join(source, fn.replace(key, '{}_{}'.format(key, k)))
karr = pca.transform(arr)
logging.info('%s: saving %s', out, karr.shape)
np.save(out, karr)
out = os.path.join(source, 'pca_{}_{}.pkl'.format(key, k))
pickle.dump(open(out, 'wb'), pca)
def main(root, k='mle'):
for subject in sorted(os.listdir(root)):
compress(os.path.join(root, subject), k, 'jac')
if __name__ == '__main__':
climate.call(main)
|
|
7b2c082f117a5481823c806af926b812045933e7
|
recipy/PatchMultipleWrappers.py
|
recipy/PatchMultipleWrappers.py
|
import six
from .PatchImporter import PatchImporter
from recipyCommon.utils import patch_function, create_wrapper
from recipyCommon.config import option_set
class PatchMultipleWrappers(PatchImporter):
"""Sublass of PatchImporter that allows patching input and output functions
using more than two wrappers.
This class should not be used directly, but subclasses which set the
following class attributes should be created:
* modulename (str)
* wrappers (WrapperList)
"""
def patch(self, mod):
"""Do the patching of `input_functions` and `output_functions`
in `mod` using `input_wrapper` and `output_wrapper` respectively.
"""
print(self.__class__)
for f in self.wrappers.functions:
if not self._ignore(f):
if option_set('general', 'debug'):
msg = 'Patching {} function: {}'.format(f['type'],
f['function'])
print(msg)
# The function that is returned by create_wrapper assumes that
# the wrapper is created directly on the patch object (the
# first argument of f is self). We have to fake that here.
# Otherwise, there will be an error, because an argument is
# missing:
# TypeError f() takes exactly 5 arguments (4 given)
setattr(self.__class__, 'wrapper', f['wrapper'])
patch_function(mod, f['function'], self.wrapper)
else:
if option_set('general', 'debug'):
print('Ignoring {} for: {}'.format(f['type'],
self.modulename))
return mod
def _ignore(self, f):
root_modulename = self.modulename.split('.')[0]
opt = 'ignored {}s'.format(f['type'])
return option_set(opt, root_modulename) or option_set(opt, 'all')
class WrapperList(object):
"""A store for functions and their wrappers.
"""
def __init__(self):
self.functions = []
def add(self, function_names, log_function, arg_loc, modname,
function_type):
if isinstance(function_names, six.string_types):
function_names = [function_names]
wrapper = create_wrapper(log_function, arg_loc, modname)
for f in function_names:
self.functions.append({'function': f,
'wrapper': wrapper,
'type': function_type})
def add_inputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'input')
def add_outputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'output')
|
Add new patch type that allows for multiple wrappers
|
Add new patch type that allows for multiple wrappers
PatchSimple allows for exactly one wrapper for input and output functions
respectively. For xarray output function save_mfdataset, the default wrapper
which logs argument 0 does not work, because it should be argument 1
that is logged. So, a second wrapper is required to be able to log this
function. Therefore a new type of patch was added.
|
Python
|
apache-2.0
|
recipy/recipy,recipy/recipy
|
Add new patch type that allows for multiple wrappers
PatchSimple allows for exactly one wrapper for input and output functions
respectively. For xarray output function save_mfdataset, the default wrapper
which logs argument 0 does not work, because it should be argument 1
that is logged. So, a second wrapper is required to be able to log this
function. Therefore a new type of patch was added.
|
import six
from .PatchImporter import PatchImporter
from recipyCommon.utils import patch_function, create_wrapper
from recipyCommon.config import option_set
class PatchMultipleWrappers(PatchImporter):
"""Sublass of PatchImporter that allows patching input and output functions
using more than two wrappers.
This class should not be used directly, but subclasses which set the
following class attributes should be created:
* modulename (str)
* wrappers (WrapperList)
"""
def patch(self, mod):
"""Do the patching of `input_functions` and `output_functions`
in `mod` using `input_wrapper` and `output_wrapper` respectively.
"""
print(self.__class__)
for f in self.wrappers.functions:
if not self._ignore(f):
if option_set('general', 'debug'):
msg = 'Patching {} function: {}'.format(f['type'],
f['function'])
print(msg)
# The function that is returned by create_wrapper assumes that
# the wrapper is created directly on the patch object (the
# first argument of f is self). We have to fake that here.
# Otherwise, there will be an error, because an argument is
# missing:
# TypeError f() takes exactly 5 arguments (4 given)
setattr(self.__class__, 'wrapper', f['wrapper'])
patch_function(mod, f['function'], self.wrapper)
else:
if option_set('general', 'debug'):
print('Ignoring {} for: {}'.format(f['type'],
self.modulename))
return mod
def _ignore(self, f):
root_modulename = self.modulename.split('.')[0]
opt = 'ignored {}s'.format(f['type'])
return option_set(opt, root_modulename) or option_set(opt, 'all')
class WrapperList(object):
"""A store for functions and their wrappers.
"""
def __init__(self):
self.functions = []
def add(self, function_names, log_function, arg_loc, modname,
function_type):
if isinstance(function_names, six.string_types):
function_names = [function_names]
wrapper = create_wrapper(log_function, arg_loc, modname)
for f in function_names:
self.functions.append({'function': f,
'wrapper': wrapper,
'type': function_type})
def add_inputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'input')
def add_outputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'output')
|
<commit_before><commit_msg>Add new patch type that allows for multiple wrappers
PatchSimple allows for exactly one wrapper for input and output functions
respectively. For xarray output function save_mfdataset, the default wrapper
which logs argument 0 does not work, because it should be argument 1
that is logged. So, a second wrapper is required to be able to log this
function. Therefore a new type of patch was added.<commit_after>
|
import six
from .PatchImporter import PatchImporter
from recipyCommon.utils import patch_function, create_wrapper
from recipyCommon.config import option_set
class PatchMultipleWrappers(PatchImporter):
"""Sublass of PatchImporter that allows patching input and output functions
using more than two wrappers.
This class should not be used directly, but subclasses which set the
following class attributes should be created:
* modulename (str)
* wrappers (WrapperList)
"""
def patch(self, mod):
"""Do the patching of `input_functions` and `output_functions`
in `mod` using `input_wrapper` and `output_wrapper` respectively.
"""
print(self.__class__)
for f in self.wrappers.functions:
if not self._ignore(f):
if option_set('general', 'debug'):
msg = 'Patching {} function: {}'.format(f['type'],
f['function'])
print(msg)
# The function that is returned by create_wrapper assumes that
# the wrapper is created directly on the patch object (the
# first argument of f is self). We have to fake that here.
# Otherwise, there will be an error, because an argument is
# missing:
# TypeError f() takes exactly 5 arguments (4 given)
setattr(self.__class__, 'wrapper', f['wrapper'])
patch_function(mod, f['function'], self.wrapper)
else:
if option_set('general', 'debug'):
print('Ignoring {} for: {}'.format(f['type'],
self.modulename))
return mod
def _ignore(self, f):
root_modulename = self.modulename.split('.')[0]
opt = 'ignored {}s'.format(f['type'])
return option_set(opt, root_modulename) or option_set(opt, 'all')
class WrapperList(object):
"""A store for functions and their wrappers.
"""
def __init__(self):
self.functions = []
def add(self, function_names, log_function, arg_loc, modname,
function_type):
if isinstance(function_names, six.string_types):
function_names = [function_names]
wrapper = create_wrapper(log_function, arg_loc, modname)
for f in function_names:
self.functions.append({'function': f,
'wrapper': wrapper,
'type': function_type})
def add_inputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'input')
def add_outputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'output')
|
Add new patch type that allows for multiple wrappers
PatchSimple allows for exactly one wrapper for input and output functions
respectively. For xarray output function save_mfdataset, the default wrapper
which logs argument 0 does not work, because it should be argument 1
that is logged. So, a second wrapper is required to be able to log this
function. Therefore a new type of patch was added.import six
from .PatchImporter import PatchImporter
from recipyCommon.utils import patch_function, create_wrapper
from recipyCommon.config import option_set
class PatchMultipleWrappers(PatchImporter):
"""Sublass of PatchImporter that allows patching input and output functions
using more than two wrappers.
This class should not be used directly, but subclasses which set the
following class attributes should be created:
* modulename (str)
* wrappers (WrapperList)
"""
def patch(self, mod):
"""Do the patching of `input_functions` and `output_functions`
in `mod` using `input_wrapper` and `output_wrapper` respectively.
"""
print(self.__class__)
for f in self.wrappers.functions:
if not self._ignore(f):
if option_set('general', 'debug'):
msg = 'Patching {} function: {}'.format(f['type'],
f['function'])
print(msg)
# The function that is returned by create_wrapper assumes that
# the wrapper is created directly on the patch object (the
# first argument of f is self). We have to fake that here.
# Otherwise, there will be an error, because an argument is
# missing:
# TypeError f() takes exactly 5 arguments (4 given)
setattr(self.__class__, 'wrapper', f['wrapper'])
patch_function(mod, f['function'], self.wrapper)
else:
if option_set('general', 'debug'):
print('Ignoring {} for: {}'.format(f['type'],
self.modulename))
return mod
def _ignore(self, f):
root_modulename = self.modulename.split('.')[0]
opt = 'ignored {}s'.format(f['type'])
return option_set(opt, root_modulename) or option_set(opt, 'all')
class WrapperList(object):
"""A store for functions and their wrappers.
"""
def __init__(self):
self.functions = []
def add(self, function_names, log_function, arg_loc, modname,
function_type):
if isinstance(function_names, six.string_types):
function_names = [function_names]
wrapper = create_wrapper(log_function, arg_loc, modname)
for f in function_names:
self.functions.append({'function': f,
'wrapper': wrapper,
'type': function_type})
def add_inputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'input')
def add_outputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'output')
|
<commit_before><commit_msg>Add new patch type that allows for multiple wrappers
PatchSimple allows for exactly one wrapper for input and output functions
respectively. For xarray output function save_mfdataset, the default wrapper
which logs argument 0 does not work, because it should be argument 1
that is logged. So, a second wrapper is required to be able to log this
function. Therefore a new type of patch was added.<commit_after>import six
from .PatchImporter import PatchImporter
from recipyCommon.utils import patch_function, create_wrapper
from recipyCommon.config import option_set
class PatchMultipleWrappers(PatchImporter):
"""Sublass of PatchImporter that allows patching input and output functions
using more than two wrappers.
This class should not be used directly, but subclasses which set the
following class attributes should be created:
* modulename (str)
* wrappers (WrapperList)
"""
def patch(self, mod):
"""Do the patching of `input_functions` and `output_functions`
in `mod` using `input_wrapper` and `output_wrapper` respectively.
"""
print(self.__class__)
for f in self.wrappers.functions:
if not self._ignore(f):
if option_set('general', 'debug'):
msg = 'Patching {} function: {}'.format(f['type'],
f['function'])
print(msg)
# The function that is returned by create_wrapper assumes that
# the wrapper is created directly on the patch object (the
# first argument of f is self). We have to fake that here.
# Otherwise, there will be an error, because an argument is
# missing:
# TypeError f() takes exactly 5 arguments (4 given)
setattr(self.__class__, 'wrapper', f['wrapper'])
patch_function(mod, f['function'], self.wrapper)
else:
if option_set('general', 'debug'):
print('Ignoring {} for: {}'.format(f['type'],
self.modulename))
return mod
def _ignore(self, f):
root_modulename = self.modulename.split('.')[0]
opt = 'ignored {}s'.format(f['type'])
return option_set(opt, root_modulename) or option_set(opt, 'all')
class WrapperList(object):
"""A store for functions and their wrappers.
"""
def __init__(self):
self.functions = []
def add(self, function_names, log_function, arg_loc, modname,
function_type):
if isinstance(function_names, six.string_types):
function_names = [function_names]
wrapper = create_wrapper(log_function, arg_loc, modname)
for f in function_names:
self.functions.append({'function': f,
'wrapper': wrapper,
'type': function_type})
def add_inputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'input')
def add_outputs(self, function_names, log_function, arg_loc, modname):
return self.add(function_names, log_function, arg_loc, modname,
'output')
|
|
25cf11421fc3f123e4d623f9736867b3800412ba
|
python3_tools/get_edx_webservices.py
|
python3_tools/get_edx_webservices.py
|
import github
from get_repos import *
webservices = []
for repo in expanded_repos_list(orgs):
try:
metadata = get_remote_yaml(repo, 'openedx.yaml')
except github.GithubException:
continue
if 'tags' in metadata and 'webservice' in metadata['tags']:
print("{}".format(repo.html_url))
webservices.append(repo)
|
Add tooling to get all of edx's web services.
|
Add tooling to get all of edx's web services.
|
Python
|
apache-2.0
|
edx/repo-tools,edx/repo-tools
|
Add tooling to get all of edx's web services.
|
import github
from get_repos import *
webservices = []
for repo in expanded_repos_list(orgs):
try:
metadata = get_remote_yaml(repo, 'openedx.yaml')
except github.GithubException:
continue
if 'tags' in metadata and 'webservice' in metadata['tags']:
print("{}".format(repo.html_url))
webservices.append(repo)
|
<commit_before><commit_msg>Add tooling to get all of edx's web services.<commit_after>
|
import github
from get_repos import *
webservices = []
for repo in expanded_repos_list(orgs):
try:
metadata = get_remote_yaml(repo, 'openedx.yaml')
except github.GithubException:
continue
if 'tags' in metadata and 'webservice' in metadata['tags']:
print("{}".format(repo.html_url))
webservices.append(repo)
|
Add tooling to get all of edx's web services.import github
from get_repos import *
webservices = []
for repo in expanded_repos_list(orgs):
try:
metadata = get_remote_yaml(repo, 'openedx.yaml')
except github.GithubException:
continue
if 'tags' in metadata and 'webservice' in metadata['tags']:
print("{}".format(repo.html_url))
webservices.append(repo)
|
<commit_before><commit_msg>Add tooling to get all of edx's web services.<commit_after>import github
from get_repos import *
webservices = []
for repo in expanded_repos_list(orgs):
try:
metadata = get_remote_yaml(repo, 'openedx.yaml')
except github.GithubException:
continue
if 'tags' in metadata and 'webservice' in metadata['tags']:
print("{}".format(repo.html_url))
webservices.append(repo)
|
|
1ced12173272a670b001cf17aa0beb08bae2eb8a
|
scipy/fftpack/realtransforms.py
|
scipy/fftpack/realtransforms.py
|
"""
Real spectrum tranforms (DCT, DST, MDCT)
"""
__all__ = ['dct1', 'dct2']
import numpy as np
from scipy.fftpack import _fftpack
import atexit
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
def dct1(x, n=None):
"""
Return Discrete Cosine Transform (type I) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 1, n)
def dct2(x, n=None):
"""
Return Discrete Cosine Transform (type II) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 2, n)
def _dct(x, type, n=None, axis=-1, overwrite_x=0):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dct is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
tmp = np.asarray(x)
if not np.isrealobj(tmp):
raise TypeError,"1st argument must be real sequence"
if n is None:
n = tmp.shape[axis]
else:
raise NotImplemented("Padding/truncating not yet implemented")
if type == 1:
f = _fftpack.dct1
elif type == 2:
f = _fftpack.dct2
else:
raise ValueError("Type %d not understood" % type)
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, 0, overwrite_x)
else:
raise NotImplementedError("Axis arg not yet implemented")
#tmp = swapaxes(tmp, axis, -1)
#tmp = work_function(tmp,n,1,0,overwrite_x)
#return swapaxes(tmp, axis, -1)
|
Add python wrapper around fftpack dct.
|
Add python wrapper around fftpack dct.
|
Python
|
bsd-3-clause
|
ilayn/scipy,ortylp/scipy,kalvdans/scipy,perimosocordiae/scipy,surhudm/scipy,witcxc/scipy,befelix/scipy,pbrod/scipy,minhlongdo/scipy,efiring/scipy,sauliusl/scipy,vanpact/scipy,cpaulik/scipy,sriki18/scipy,endolith/scipy,jjhelmus/scipy,giorgiop/scipy,vigna/scipy,apbard/scipy,WarrenWeckesser/scipy,larsmans/scipy,woodscn/scipy,zaxliu/scipy,anielsen001/scipy,newemailjdm/scipy,lukauskas/scipy,matthew-brett/scipy,mikebenfield/scipy,Srisai85/scipy,perimosocordiae/scipy,sonnyhu/scipy,WarrenWeckesser/scipy,aarchiba/scipy,andyfaff/scipy,trankmichael/scipy,anntzer/scipy,ortylp/scipy,kleskjr/scipy,jamestwebber/scipy,fredrikw/scipy,anntzer/scipy,ogrisel/scipy,mikebenfield/scipy,sargas/scipy,ChanderG/scipy,Eric89GXL/scipy,witcxc/scipy,nonhermitian/scipy,gfyoung/scipy,maniteja123/scipy,josephcslater/scipy,richardotis/scipy,gfyoung/scipy,zaxliu/scipy,felipebetancur/scipy,pyramania/scipy,richardotis/scipy,ilayn/scipy,niknow/scipy,richardotis/scipy,mikebenfield/scipy,Stefan-Endres/scipy,vanpact/scipy,befelix/scipy,sauliusl/scipy,pschella/scipy,ilayn/scipy,piyush0609/scipy,mdhaber/scipy,richardotis/scipy,gef756/scipy,jjhelmus/scipy,Srisai85/scipy,newemailjdm/scipy,anntzer/scipy,pnedunuri/scipy,futurulus/scipy,minhlongdo/scipy,tylerjereddy/scipy,fernand/scipy,mtrbean/scipy,zerothi/scipy,gdooper/scipy,Newman101/scipy,trankmichael/scipy,njwilson23/scipy,nvoron23/scipy,woodscn/scipy,Dapid/scipy,vberaudi/scipy,rmcgibbo/scipy,jor-/scipy,FRidh/scipy,gdooper/scipy,WarrenWeckesser/scipy,andim/scipy,person142/scipy,gertingold/scipy,Eric89GXL/scipy,apbard/scipy,Srisai85/scipy,woodscn/scipy,Srisai85/scipy,lhilt/scipy,andyfaff/scipy,e-q/scipy,kleskjr/scipy,sargas/scipy,argriffing/scipy,Gillu13/scipy,sonnyhu/scipy,zaxliu/scipy,anielsen001/scipy,tylerjereddy/scipy,argriffing/scipy,josephcslater/scipy,WillieMaddox/scipy,giorgiop/scipy,vhaasteren/scipy,nonhermitian/scipy,jseabold/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,kalvdans/scipy,chatcannon/scipy,e-q/scipy,surhudm/scipy,dch312/scipy,aman-iitj/scipy,jsilter/scipy,sonnyhu/scipy,mortonjt/scipy,grlee77/scipy,mtrbean/scipy,pyramania/scipy,niknow/scipy,rgommers/scipy,aman-iitj/scipy,kalvdans/scipy,tylerjereddy/scipy,witcxc/scipy,dominicelse/scipy,Shaswat27/scipy,gef756/scipy,pbrod/scipy,ndchorley/scipy,juliantaylor/scipy,rmcgibbo/scipy,trankmichael/scipy,vhaasteren/scipy,efiring/scipy,ChanderG/scipy,Newman101/scipy,witcxc/scipy,jseabold/scipy,jakevdp/scipy,WarrenWeckesser/scipy,dch312/scipy,pizzathief/scipy,njwilson23/scipy,arokem/scipy,lhilt/scipy,fernand/scipy,zxsted/scipy,trankmichael/scipy,WillieMaddox/scipy,Dapid/scipy,behzadnouri/scipy,gertingold/scipy,arokem/scipy,rmcgibbo/scipy,andyfaff/scipy,raoulbq/scipy,dch312/scipy,fernand/scipy,felipebetancur/scipy,chatcannon/scipy,Shaswat27/scipy,vhaasteren/scipy,mortada/scipy,juliantaylor/scipy,dominicelse/scipy,haudren/scipy,lukauskas/scipy,andim/scipy,pnedunuri/scipy,jseabold/scipy,mdhaber/scipy,josephcslater/scipy,WarrenWeckesser/scipy,fredrikw/scipy,haudren/scipy,futurulus/scipy,haudren/scipy,aeklant/scipy,mtrbean/scipy,mgaitan/scipy,lukauskas/scipy,Kamp9/scipy,endolith/scipy,Srisai85/scipy,cpaulik/scipy,jjhelmus/scipy,mhogg/scipy,vanpact/scipy,mgaitan/scipy,scipy/scipy,zxsted/scipy,nmayorov/scipy,gfyoung/scipy,ndchorley/scipy,jseabold/scipy,andyfaff/scipy,Shaswat27/scipy,lukauskas/scipy,jonycgn/scipy,perimosocordiae/scipy,matthewalbani/scipy,Kamp9/scipy,sriki18/scipy,Dapid/scipy,jamestwebber/scipy,maniteja123/scipy,jseabold/scipy,Gillu13/scipy,Kamp9/scipy,scipy/scipy,efiring/scipy,arokem/scipy,FRidh/scipy,gfyoung/scipy,dominicelse/scipy,mhogg/scipy,zaxliu/scipy,jonycgn/scipy,jonycgn/scipy,fredrikw/scipy,Srisai85/scipy,Stefan-Endres/scipy,pschella/scipy,mgaitan/scipy,maciejkula/scipy,ales-erjavec/scipy,aeklant/scipy,piyush0609/scipy,bkendzior/scipy,nmayorov/scipy,hainm/scipy,matthew-brett/scipy,lhilt/scipy,lukauskas/scipy,fredrikw/scipy,sauliusl/scipy,vhaasteren/scipy,raoulbq/scipy,vanpact/scipy,dch312/scipy,zxsted/scipy,mortonjt/scipy,pizzathief/scipy,pschella/scipy,raoulbq/scipy,surhudm/scipy,nvoron23/scipy,kleskjr/scipy,vanpact/scipy,efiring/scipy,Gillu13/scipy,perimosocordiae/scipy,WarrenWeckesser/scipy,jakevdp/scipy,aeklant/scipy,fernand/scipy,gef756/scipy,vhaasteren/scipy,aman-iitj/scipy,andim/scipy,maniteja123/scipy,teoliphant/scipy,haudren/scipy,Shaswat27/scipy,sonnyhu/scipy,jor-/scipy,grlee77/scipy,kalvdans/scipy,teoliphant/scipy,aeklant/scipy,mortada/scipy,zerothi/scipy,endolith/scipy,mhogg/scipy,rmcgibbo/scipy,nmayorov/scipy,maciejkula/scipy,gef756/scipy,Dapid/scipy,maniteja123/scipy,mdhaber/scipy,jonycgn/scipy,mgaitan/scipy,mhogg/scipy,pyramania/scipy,niknow/scipy,mdhaber/scipy,nonhermitian/scipy,chatcannon/scipy,zerothi/scipy,anielsen001/scipy,mhogg/scipy,ChanderG/scipy,ogrisel/scipy,surhudm/scipy,minhlongdo/scipy,andim/scipy,mortonjt/scipy,pizzathief/scipy,argriffing/scipy,futurulus/scipy,niknow/scipy,woodscn/scipy,gef756/scipy,ales-erjavec/scipy,pizzathief/scipy,chatcannon/scipy,piyush0609/scipy,sauliusl/scipy,raoulbq/scipy,bkendzior/scipy,apbard/scipy,scipy/scipy,Dapid/scipy,Gillu13/scipy,teoliphant/scipy,niknow/scipy,ogrisel/scipy,petebachant/scipy,befelix/scipy,minhlongdo/scipy,tylerjereddy/scipy,WillieMaddox/scipy,sargas/scipy,fernand/scipy,anntzer/scipy,Gillu13/scipy,nvoron23/scipy,haudren/scipy,woodscn/scipy,jsilter/scipy,Eric89GXL/scipy,ales-erjavec/scipy,petebachant/scipy,haudren/scipy,bkendzior/scipy,vberaudi/scipy,lhilt/scipy,mgaitan/scipy,newemailjdm/scipy,behzadnouri/scipy,pschella/scipy,trankmichael/scipy,Newman101/scipy,vberaudi/scipy,njwilson23/scipy,cpaulik/scipy,vigna/scipy,mingwpy/scipy,rmcgibbo/scipy,sargas/scipy,behzadnouri/scipy,Stefan-Endres/scipy,jakevdp/scipy,mingwpy/scipy,surhudm/scipy,kleskjr/scipy,zaxliu/scipy,aarchiba/scipy,pyramania/scipy,newemailjdm/scipy,jamestwebber/scipy,Newman101/scipy,ortylp/scipy,aarchiba/scipy,zxsted/scipy,cpaulik/scipy,maciejkula/scipy,sriki18/scipy,mtrbean/scipy,anielsen001/scipy,pnedunuri/scipy,ogrisel/scipy,sauliusl/scipy,behzadnouri/scipy,jamestwebber/scipy,rmcgibbo/scipy,jor-/scipy,jjhelmus/scipy,bkendzior/scipy,WillieMaddox/scipy,aman-iitj/scipy,giorgiop/scipy,mtrbean/scipy,zaxliu/scipy,woodscn/scipy,mortonjt/scipy,hainm/scipy,cpaulik/scipy,jsilter/scipy,e-q/scipy,person142/scipy,e-q/scipy,zxsted/scipy,ortylp/scipy,matthew-brett/scipy,gfyoung/scipy,njwilson23/scipy,niknow/scipy,larsmans/scipy,raoulbq/scipy,gdooper/scipy,zxsted/scipy,endolith/scipy,andim/scipy,ilayn/scipy,ogrisel/scipy,larsmans/scipy,mingwpy/scipy,kleskjr/scipy,behzadnouri/scipy,hainm/scipy,mingwpy/scipy,mortonjt/scipy,WillieMaddox/scipy,futurulus/scipy,matthewalbani/scipy,nonhermitian/scipy,felipebetancur/scipy,jseabold/scipy,juliantaylor/scipy,sonnyhu/scipy,kleskjr/scipy,ales-erjavec/scipy,ales-erjavec/scipy,Kamp9/scipy,jamestwebber/scipy,rgommers/scipy,vigna/scipy,minhlongdo/scipy,futurulus/scipy,jonycgn/scipy,vigna/scipy,tylerjereddy/scipy,vanpact/scipy,piyush0609/scipy,petebachant/scipy,larsmans/scipy,befelix/scipy,mikebenfield/scipy,ilayn/scipy,scipy/scipy,sriki18/scipy,jor-/scipy,person142/scipy,newemailjdm/scipy,grlee77/scipy,jonycgn/scipy,pnedunuri/scipy,aman-iitj/scipy,trankmichael/scipy,argriffing/scipy,andim/scipy,behzadnouri/scipy,mortonjt/scipy,pbrod/scipy,efiring/scipy,ortylp/scipy,maciejkula/scipy,scipy/scipy,nvoron23/scipy,arokem/scipy,person142/scipy,sriki18/scipy,felipebetancur/scipy,mikebenfield/scipy,petebachant/scipy,gertingold/scipy,matthewalbani/scipy,cpaulik/scipy,sriki18/scipy,maciejkula/scipy,bkendzior/scipy,andyfaff/scipy,zerothi/scipy,fredrikw/scipy,pyramania/scipy,teoliphant/scipy,e-q/scipy,witcxc/scipy,argriffing/scipy,Shaswat27/scipy,juliantaylor/scipy,gdooper/scipy,petebachant/scipy,Gillu13/scipy,teoliphant/scipy,endolith/scipy,gdooper/scipy,vigna/scipy,vberaudi/scipy,rgommers/scipy,grlee77/scipy,nvoron23/scipy,ChanderG/scipy,mortada/scipy,dominicelse/scipy,matthew-brett/scipy,ChanderG/scipy,ndchorley/scipy,nmayorov/scipy,mgaitan/scipy,gertingold/scipy,efiring/scipy,vberaudi/scipy,FRidh/scipy,pnedunuri/scipy,ndchorley/scipy,maniteja123/scipy,dch312/scipy,mdhaber/scipy,ndchorley/scipy,gef756/scipy,hainm/scipy,dominicelse/scipy,vberaudi/scipy,mortada/scipy,hainm/scipy,pnedunuri/scipy,piyush0609/scipy,larsmans/scipy,Kamp9/scipy,Dapid/scipy,aman-iitj/scipy,raoulbq/scipy,mtrbean/scipy,scipy/scipy,ales-erjavec/scipy,fernand/scipy,matthewalbani/scipy,Kamp9/scipy,futurulus/scipy,gertingold/scipy,endolith/scipy,mingwpy/scipy,piyush0609/scipy,nmayorov/scipy,surhudm/scipy,kalvdans/scipy,Eric89GXL/scipy,jsilter/scipy,josephcslater/scipy,ortylp/scipy,aarchiba/scipy,newemailjdm/scipy,mhogg/scipy,mdhaber/scipy,mortada/scipy,befelix/scipy,apbard/scipy,Shaswat27/scipy,jsilter/scipy,josephcslater/scipy,fredrikw/scipy,felipebetancur/scipy,jor-/scipy,anielsen001/scipy,njwilson23/scipy,ilayn/scipy,lhilt/scipy,Stefan-Endres/scipy,ndchorley/scipy,richardotis/scipy,matthew-brett/scipy,lukauskas/scipy,arokem/scipy,pbrod/scipy,Eric89GXL/scipy,chatcannon/scipy,sargas/scipy,pbrod/scipy,minhlongdo/scipy,giorgiop/scipy,aeklant/scipy,grlee77/scipy,sauliusl/scipy,WillieMaddox/scipy,mingwpy/scipy,Newman101/scipy,hainm/scipy,FRidh/scipy,anntzer/scipy,larsmans/scipy,mortada/scipy,rgommers/scipy,giorgiop/scipy,andyfaff/scipy,zerothi/scipy,juliantaylor/scipy,njwilson23/scipy,ChanderG/scipy,FRidh/scipy,jakevdp/scipy,vhaasteren/scipy,giorgiop/scipy,perimosocordiae/scipy,Eric89GXL/scipy,chatcannon/scipy,FRidh/scipy,pschella/scipy,anntzer/scipy,apbard/scipy,pbrod/scipy,maniteja123/scipy,felipebetancur/scipy,Newman101/scipy,rgommers/scipy,zerothi/scipy,petebachant/scipy,jakevdp/scipy,jjhelmus/scipy,sonnyhu/scipy,anielsen001/scipy,pizzathief/scipy,nonhermitian/scipy,nvoron23/scipy,person142/scipy,aarchiba/scipy,richardotis/scipy,matthewalbani/scipy,Stefan-Endres/scipy,argriffing/scipy
|
Add python wrapper around fftpack dct.
|
"""
Real spectrum tranforms (DCT, DST, MDCT)
"""
__all__ = ['dct1', 'dct2']
import numpy as np
from scipy.fftpack import _fftpack
import atexit
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
def dct1(x, n=None):
"""
Return Discrete Cosine Transform (type I) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 1, n)
def dct2(x, n=None):
"""
Return Discrete Cosine Transform (type II) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 2, n)
def _dct(x, type, n=None, axis=-1, overwrite_x=0):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dct is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
tmp = np.asarray(x)
if not np.isrealobj(tmp):
raise TypeError,"1st argument must be real sequence"
if n is None:
n = tmp.shape[axis]
else:
raise NotImplemented("Padding/truncating not yet implemented")
if type == 1:
f = _fftpack.dct1
elif type == 2:
f = _fftpack.dct2
else:
raise ValueError("Type %d not understood" % type)
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, 0, overwrite_x)
else:
raise NotImplementedError("Axis arg not yet implemented")
#tmp = swapaxes(tmp, axis, -1)
#tmp = work_function(tmp,n,1,0,overwrite_x)
#return swapaxes(tmp, axis, -1)
|
<commit_before><commit_msg>Add python wrapper around fftpack dct.<commit_after>
|
"""
Real spectrum tranforms (DCT, DST, MDCT)
"""
__all__ = ['dct1', 'dct2']
import numpy as np
from scipy.fftpack import _fftpack
import atexit
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
def dct1(x, n=None):
"""
Return Discrete Cosine Transform (type I) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 1, n)
def dct2(x, n=None):
"""
Return Discrete Cosine Transform (type II) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 2, n)
def _dct(x, type, n=None, axis=-1, overwrite_x=0):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dct is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
tmp = np.asarray(x)
if not np.isrealobj(tmp):
raise TypeError,"1st argument must be real sequence"
if n is None:
n = tmp.shape[axis]
else:
raise NotImplemented("Padding/truncating not yet implemented")
if type == 1:
f = _fftpack.dct1
elif type == 2:
f = _fftpack.dct2
else:
raise ValueError("Type %d not understood" % type)
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, 0, overwrite_x)
else:
raise NotImplementedError("Axis arg not yet implemented")
#tmp = swapaxes(tmp, axis, -1)
#tmp = work_function(tmp,n,1,0,overwrite_x)
#return swapaxes(tmp, axis, -1)
|
Add python wrapper around fftpack dct."""
Real spectrum tranforms (DCT, DST, MDCT)
"""
__all__ = ['dct1', 'dct2']
import numpy as np
from scipy.fftpack import _fftpack
import atexit
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
def dct1(x, n=None):
"""
Return Discrete Cosine Transform (type I) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 1, n)
def dct2(x, n=None):
"""
Return Discrete Cosine Transform (type II) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 2, n)
def _dct(x, type, n=None, axis=-1, overwrite_x=0):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dct is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
tmp = np.asarray(x)
if not np.isrealobj(tmp):
raise TypeError,"1st argument must be real sequence"
if n is None:
n = tmp.shape[axis]
else:
raise NotImplemented("Padding/truncating not yet implemented")
if type == 1:
f = _fftpack.dct1
elif type == 2:
f = _fftpack.dct2
else:
raise ValueError("Type %d not understood" % type)
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, 0, overwrite_x)
else:
raise NotImplementedError("Axis arg not yet implemented")
#tmp = swapaxes(tmp, axis, -1)
#tmp = work_function(tmp,n,1,0,overwrite_x)
#return swapaxes(tmp, axis, -1)
|
<commit_before><commit_msg>Add python wrapper around fftpack dct.<commit_after>"""
Real spectrum tranforms (DCT, DST, MDCT)
"""
__all__ = ['dct1', 'dct2']
import numpy as np
from scipy.fftpack import _fftpack
import atexit
atexit.register(_fftpack.destroy_dct1_cache)
atexit.register(_fftpack.destroy_dct2_cache)
def dct1(x, n=None):
"""
Return Discrete Cosine Transform (type I) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 1, n)
def dct2(x, n=None):
"""
Return Discrete Cosine Transform (type II) of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
Returns
-------
y : real ndarray
"""
return _dct(x, 2, n)
def _dct(x, type, n=None, axis=-1, overwrite_x=0):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array-like
input array.
n : int, optional
Length of the transform.
axis : int, optional
Axis along which the dct is computed. (default=-1)
overwrite_x : bool, optional
If True the contents of x can be destroyed. (default=False)
Returns
-------
z : real ndarray
"""
tmp = np.asarray(x)
if not np.isrealobj(tmp):
raise TypeError,"1st argument must be real sequence"
if n is None:
n = tmp.shape[axis]
else:
raise NotImplemented("Padding/truncating not yet implemented")
if type == 1:
f = _fftpack.dct1
elif type == 2:
f = _fftpack.dct2
else:
raise ValueError("Type %d not understood" % type)
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, 0, overwrite_x)
else:
raise NotImplementedError("Axis arg not yet implemented")
#tmp = swapaxes(tmp, axis, -1)
#tmp = work_function(tmp,n,1,0,overwrite_x)
#return swapaxes(tmp, axis, -1)
|
|
d215b6d00d06ffa9917b7bdf8323f33d83e35cf7
|
src/waldur_mastermind/support/management/commands/switching_backend_server.py
|
src/waldur_mastermind/support/management/commands/switching_backend_server.py
|
from django.core.management.base import BaseCommand
from waldur_mastermind.support import backend as support_backend
from ... import models
class Command(BaseCommand):
help = ("Backend data update if a server was switched.")
def handle(self, *args, **options):
models.RequestType.objects.all().delete()
backend = support_backend.get_active_backend()
backend.pull_request_types()
for support_customer in models.SupportCustomer.objects.all():
exists_user = backend.manager.search_users(support_customer.user.email)
if exists_user:
backend_user = exists_user[0]
else:
backend_user = backend.create_user(support_customer.user)
support_customer.backend_id = backend_user.key
support_customer.save()
|
Implement management command to switch backend server in JIRA Service Desk plugin.
|
Implement management command to switch backend server in JIRA Service Desk plugin.
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur
|
Implement management command to switch backend server in JIRA Service Desk plugin.
|
from django.core.management.base import BaseCommand
from waldur_mastermind.support import backend as support_backend
from ... import models
class Command(BaseCommand):
help = ("Backend data update if a server was switched.")
def handle(self, *args, **options):
models.RequestType.objects.all().delete()
backend = support_backend.get_active_backend()
backend.pull_request_types()
for support_customer in models.SupportCustomer.objects.all():
exists_user = backend.manager.search_users(support_customer.user.email)
if exists_user:
backend_user = exists_user[0]
else:
backend_user = backend.create_user(support_customer.user)
support_customer.backend_id = backend_user.key
support_customer.save()
|
<commit_before><commit_msg>Implement management command to switch backend server in JIRA Service Desk plugin.<commit_after>
|
from django.core.management.base import BaseCommand
from waldur_mastermind.support import backend as support_backend
from ... import models
class Command(BaseCommand):
help = ("Backend data update if a server was switched.")
def handle(self, *args, **options):
models.RequestType.objects.all().delete()
backend = support_backend.get_active_backend()
backend.pull_request_types()
for support_customer in models.SupportCustomer.objects.all():
exists_user = backend.manager.search_users(support_customer.user.email)
if exists_user:
backend_user = exists_user[0]
else:
backend_user = backend.create_user(support_customer.user)
support_customer.backend_id = backend_user.key
support_customer.save()
|
Implement management command to switch backend server in JIRA Service Desk plugin.from django.core.management.base import BaseCommand
from waldur_mastermind.support import backend as support_backend
from ... import models
class Command(BaseCommand):
help = ("Backend data update if a server was switched.")
def handle(self, *args, **options):
models.RequestType.objects.all().delete()
backend = support_backend.get_active_backend()
backend.pull_request_types()
for support_customer in models.SupportCustomer.objects.all():
exists_user = backend.manager.search_users(support_customer.user.email)
if exists_user:
backend_user = exists_user[0]
else:
backend_user = backend.create_user(support_customer.user)
support_customer.backend_id = backend_user.key
support_customer.save()
|
<commit_before><commit_msg>Implement management command to switch backend server in JIRA Service Desk plugin.<commit_after>from django.core.management.base import BaseCommand
from waldur_mastermind.support import backend as support_backend
from ... import models
class Command(BaseCommand):
help = ("Backend data update if a server was switched.")
def handle(self, *args, **options):
models.RequestType.objects.all().delete()
backend = support_backend.get_active_backend()
backend.pull_request_types()
for support_customer in models.SupportCustomer.objects.all():
exists_user = backend.manager.search_users(support_customer.user.email)
if exists_user:
backend_user = exists_user[0]
else:
backend_user = backend.create_user(support_customer.user)
support_customer.backend_id = backend_user.key
support_customer.save()
|
|
4374eb9cf20350f1d5610bfbae358dbb733bb044
|
python/subnets-in-cidr.py
|
python/subnets-in-cidr.py
|
#!/usr/bin/python
import argparse
import netaddr
import os
import infoblox_netmri
parser = argparse.ArgumentParser()
parser.add_argument("cidr")
args = parser.parse_args()
print("Searching for subnets in %s." % args.cidr)
url = os.environ['NETMRI_API_URL']
user = os.environ['NETMRI_USER']
password = os.environ['NETMRI_PASSWORD']
sslverify = os.environ.get('NETMRI_SSLVERIFY')
if sslverify is not None and sslverify.lower() == "false":
sslverify = False
else:
sslverify = True
c = infoblox_netmri.InfobloxNetMRI({
'url': url,
'username': user,
'password': password,
'sslverify': sslverify
})
net = netaddr.IPNetwork(args.cidr)
range = "%s,%s" % (long(net.network), long(net.broadcast))
subnets = c.api_request('subnets/find',
{'op_SubnetIPNumeric': 'between',
'val_c_SubnetIPNumeric': range,
'include': 'vlan'})
print("Found %d subnets in %s." % (subnets['total'], args.cidr))
if subnets['total'] > subnets['current']:
print("Showing first %d subnets found." % subnets['current'])
vlan_map = {}
for v in subnets['vlan']:
vlan_map[v['VlanID']] = (v['VlanIndex'], v['VlanName'])
FORMAT='%20s %8s %20s'
print FORMAT % ('Subnet', 'VLAN ID', 'VLAN Name')
for s in subnets['subnets']:
vlan = vlan_map.get(s['VlanID']) or ('n/a', 'Unknown')
print FORMAT % (s['SubnetCIDR'], vlan[0], vlan[1])
|
Add a sample script for searching subnets in a CIDR
|
Add a sample script for searching subnets in a CIDR
See request in community:
https://community.infoblox.com/t5/Network-Change-Configuration/Anyone-ha
ve-an-Example-NetMRI-REST-call-using-filters-in-Python/m-p/5021#M1430
|
Python
|
mit
|
infobloxopen/netmri-toolkit,infobloxopen/netmri-toolkit,infobloxopen/netmri-toolkit
|
Add a sample script for searching subnets in a CIDR
See request in community:
https://community.infoblox.com/t5/Network-Change-Configuration/Anyone-ha
ve-an-Example-NetMRI-REST-call-using-filters-in-Python/m-p/5021#M1430
|
#!/usr/bin/python
import argparse
import netaddr
import os
import infoblox_netmri
parser = argparse.ArgumentParser()
parser.add_argument("cidr")
args = parser.parse_args()
print("Searching for subnets in %s." % args.cidr)
url = os.environ['NETMRI_API_URL']
user = os.environ['NETMRI_USER']
password = os.environ['NETMRI_PASSWORD']
sslverify = os.environ.get('NETMRI_SSLVERIFY')
if sslverify is not None and sslverify.lower() == "false":
sslverify = False
else:
sslverify = True
c = infoblox_netmri.InfobloxNetMRI({
'url': url,
'username': user,
'password': password,
'sslverify': sslverify
})
net = netaddr.IPNetwork(args.cidr)
range = "%s,%s" % (long(net.network), long(net.broadcast))
subnets = c.api_request('subnets/find',
{'op_SubnetIPNumeric': 'between',
'val_c_SubnetIPNumeric': range,
'include': 'vlan'})
print("Found %d subnets in %s." % (subnets['total'], args.cidr))
if subnets['total'] > subnets['current']:
print("Showing first %d subnets found." % subnets['current'])
vlan_map = {}
for v in subnets['vlan']:
vlan_map[v['VlanID']] = (v['VlanIndex'], v['VlanName'])
FORMAT='%20s %8s %20s'
print FORMAT % ('Subnet', 'VLAN ID', 'VLAN Name')
for s in subnets['subnets']:
vlan = vlan_map.get(s['VlanID']) or ('n/a', 'Unknown')
print FORMAT % (s['SubnetCIDR'], vlan[0], vlan[1])
|
<commit_before><commit_msg>Add a sample script for searching subnets in a CIDR
See request in community:
https://community.infoblox.com/t5/Network-Change-Configuration/Anyone-ha
ve-an-Example-NetMRI-REST-call-using-filters-in-Python/m-p/5021#M1430<commit_after>
|
#!/usr/bin/python
import argparse
import netaddr
import os
import infoblox_netmri
parser = argparse.ArgumentParser()
parser.add_argument("cidr")
args = parser.parse_args()
print("Searching for subnets in %s." % args.cidr)
url = os.environ['NETMRI_API_URL']
user = os.environ['NETMRI_USER']
password = os.environ['NETMRI_PASSWORD']
sslverify = os.environ.get('NETMRI_SSLVERIFY')
if sslverify is not None and sslverify.lower() == "false":
sslverify = False
else:
sslverify = True
c = infoblox_netmri.InfobloxNetMRI({
'url': url,
'username': user,
'password': password,
'sslverify': sslverify
})
net = netaddr.IPNetwork(args.cidr)
range = "%s,%s" % (long(net.network), long(net.broadcast))
subnets = c.api_request('subnets/find',
{'op_SubnetIPNumeric': 'between',
'val_c_SubnetIPNumeric': range,
'include': 'vlan'})
print("Found %d subnets in %s." % (subnets['total'], args.cidr))
if subnets['total'] > subnets['current']:
print("Showing first %d subnets found." % subnets['current'])
vlan_map = {}
for v in subnets['vlan']:
vlan_map[v['VlanID']] = (v['VlanIndex'], v['VlanName'])
FORMAT='%20s %8s %20s'
print FORMAT % ('Subnet', 'VLAN ID', 'VLAN Name')
for s in subnets['subnets']:
vlan = vlan_map.get(s['VlanID']) or ('n/a', 'Unknown')
print FORMAT % (s['SubnetCIDR'], vlan[0], vlan[1])
|
Add a sample script for searching subnets in a CIDR
See request in community:
https://community.infoblox.com/t5/Network-Change-Configuration/Anyone-ha
ve-an-Example-NetMRI-REST-call-using-filters-in-Python/m-p/5021#M1430#!/usr/bin/python
import argparse
import netaddr
import os
import infoblox_netmri
parser = argparse.ArgumentParser()
parser.add_argument("cidr")
args = parser.parse_args()
print("Searching for subnets in %s." % args.cidr)
url = os.environ['NETMRI_API_URL']
user = os.environ['NETMRI_USER']
password = os.environ['NETMRI_PASSWORD']
sslverify = os.environ.get('NETMRI_SSLVERIFY')
if sslverify is not None and sslverify.lower() == "false":
sslverify = False
else:
sslverify = True
c = infoblox_netmri.InfobloxNetMRI({
'url': url,
'username': user,
'password': password,
'sslverify': sslverify
})
net = netaddr.IPNetwork(args.cidr)
range = "%s,%s" % (long(net.network), long(net.broadcast))
subnets = c.api_request('subnets/find',
{'op_SubnetIPNumeric': 'between',
'val_c_SubnetIPNumeric': range,
'include': 'vlan'})
print("Found %d subnets in %s." % (subnets['total'], args.cidr))
if subnets['total'] > subnets['current']:
print("Showing first %d subnets found." % subnets['current'])
vlan_map = {}
for v in subnets['vlan']:
vlan_map[v['VlanID']] = (v['VlanIndex'], v['VlanName'])
FORMAT='%20s %8s %20s'
print FORMAT % ('Subnet', 'VLAN ID', 'VLAN Name')
for s in subnets['subnets']:
vlan = vlan_map.get(s['VlanID']) or ('n/a', 'Unknown')
print FORMAT % (s['SubnetCIDR'], vlan[0], vlan[1])
|
<commit_before><commit_msg>Add a sample script for searching subnets in a CIDR
See request in community:
https://community.infoblox.com/t5/Network-Change-Configuration/Anyone-ha
ve-an-Example-NetMRI-REST-call-using-filters-in-Python/m-p/5021#M1430<commit_after>#!/usr/bin/python
import argparse
import netaddr
import os
import infoblox_netmri
parser = argparse.ArgumentParser()
parser.add_argument("cidr")
args = parser.parse_args()
print("Searching for subnets in %s." % args.cidr)
url = os.environ['NETMRI_API_URL']
user = os.environ['NETMRI_USER']
password = os.environ['NETMRI_PASSWORD']
sslverify = os.environ.get('NETMRI_SSLVERIFY')
if sslverify is not None and sslverify.lower() == "false":
sslverify = False
else:
sslverify = True
c = infoblox_netmri.InfobloxNetMRI({
'url': url,
'username': user,
'password': password,
'sslverify': sslverify
})
net = netaddr.IPNetwork(args.cidr)
range = "%s,%s" % (long(net.network), long(net.broadcast))
subnets = c.api_request('subnets/find',
{'op_SubnetIPNumeric': 'between',
'val_c_SubnetIPNumeric': range,
'include': 'vlan'})
print("Found %d subnets in %s." % (subnets['total'], args.cidr))
if subnets['total'] > subnets['current']:
print("Showing first %d subnets found." % subnets['current'])
vlan_map = {}
for v in subnets['vlan']:
vlan_map[v['VlanID']] = (v['VlanIndex'], v['VlanName'])
FORMAT='%20s %8s %20s'
print FORMAT % ('Subnet', 'VLAN ID', 'VLAN Name')
for s in subnets['subnets']:
vlan = vlan_map.get(s['VlanID']) or ('n/a', 'Unknown')
print FORMAT % (s['SubnetCIDR'], vlan[0], vlan[1])
|
|
5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec
|
openquake/commands/webui.py
|
openquake/commands/webui.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: bbfc5549fb632d535ed1934e0d2bd1226ccd4507
|
Python
|
agpl-3.0
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: bbfc5549fb632d535ed1934e0d2bd1226ccd4507
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: bbfc5549fb632d535ed1934e0d2bd1226ccd4507<commit_after>
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: bbfc5549fb632d535ed1934e0d2bd1226ccd4507# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: bbfc5549fb632d535ed1934e0d2bd1226ccd4507<commit_after># -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
|
24478a92ef1218ade7867ba5a2f2d8172aea8c64
|
tests/test_load_data.py
|
tests/test_load_data.py
|
import unittest
import sys
sys.path.append('soccer')
import leagueproperties
import leagueids
import main
class TestLoadData(unittest.TestCase):
TEAMS_INFO_FILENAME = "teams.json"
def set_up(self):
pass
def tear_down(self):
pass
def test_load_team_data(self):
raised = False
try:
main.load_json(TestLoadData.TEAMS_INFO_FILENAME)["teams"]
except IOError:
raised = True
self.assertFalse(raised)
def test_load_league_properties(self):
raised = False
try:
league_properties = leagueproperties.LEAGUE_PROPERTIES
league_properties.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
def test_load_league_ids(self):
raised = False
try:
leage_ids = leagueids.LEAGUE_IDS
leage_ids.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
if __name__ == '__main__':
unittest.main()
|
Implement simple unit tests for loading data at startup
|
Implement simple unit tests for loading data at startup
|
Python
|
mit
|
Saturn/soccer-cli,architv/soccer-cli
|
Implement simple unit tests for loading data at startup
|
import unittest
import sys
sys.path.append('soccer')
import leagueproperties
import leagueids
import main
class TestLoadData(unittest.TestCase):
TEAMS_INFO_FILENAME = "teams.json"
def set_up(self):
pass
def tear_down(self):
pass
def test_load_team_data(self):
raised = False
try:
main.load_json(TestLoadData.TEAMS_INFO_FILENAME)["teams"]
except IOError:
raised = True
self.assertFalse(raised)
def test_load_league_properties(self):
raised = False
try:
league_properties = leagueproperties.LEAGUE_PROPERTIES
league_properties.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
def test_load_league_ids(self):
raised = False
try:
leage_ids = leagueids.LEAGUE_IDS
leage_ids.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Implement simple unit tests for loading data at startup<commit_after>
|
import unittest
import sys
sys.path.append('soccer')
import leagueproperties
import leagueids
import main
class TestLoadData(unittest.TestCase):
TEAMS_INFO_FILENAME = "teams.json"
def set_up(self):
pass
def tear_down(self):
pass
def test_load_team_data(self):
raised = False
try:
main.load_json(TestLoadData.TEAMS_INFO_FILENAME)["teams"]
except IOError:
raised = True
self.assertFalse(raised)
def test_load_league_properties(self):
raised = False
try:
league_properties = leagueproperties.LEAGUE_PROPERTIES
league_properties.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
def test_load_league_ids(self):
raised = False
try:
leage_ids = leagueids.LEAGUE_IDS
leage_ids.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
if __name__ == '__main__':
unittest.main()
|
Implement simple unit tests for loading data at startupimport unittest
import sys
sys.path.append('soccer')
import leagueproperties
import leagueids
import main
class TestLoadData(unittest.TestCase):
TEAMS_INFO_FILENAME = "teams.json"
def set_up(self):
pass
def tear_down(self):
pass
def test_load_team_data(self):
raised = False
try:
main.load_json(TestLoadData.TEAMS_INFO_FILENAME)["teams"]
except IOError:
raised = True
self.assertFalse(raised)
def test_load_league_properties(self):
raised = False
try:
league_properties = leagueproperties.LEAGUE_PROPERTIES
league_properties.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
def test_load_league_ids(self):
raised = False
try:
leage_ids = leagueids.LEAGUE_IDS
leage_ids.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Implement simple unit tests for loading data at startup<commit_after>import unittest
import sys
sys.path.append('soccer')
import leagueproperties
import leagueids
import main
class TestLoadData(unittest.TestCase):
TEAMS_INFO_FILENAME = "teams.json"
def set_up(self):
pass
def tear_down(self):
pass
def test_load_team_data(self):
raised = False
try:
main.load_json(TestLoadData.TEAMS_INFO_FILENAME)["teams"]
except IOError:
raised = True
self.assertFalse(raised)
def test_load_league_properties(self):
raised = False
try:
league_properties = leagueproperties.LEAGUE_PROPERTIES
league_properties.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
def test_load_league_ids(self):
raised = False
try:
leage_ids = leagueids.LEAGUE_IDS
leage_ids.keys()
except AttributeError:
raised = True
self.assertFalse(raised)
if __name__ == '__main__':
unittest.main()
|
|
f72f7252808266b8ee9e3f5f46b3c617a4b1f787
|
app/tests/model_helpers.py
|
app/tests/model_helpers.py
|
import pytest
def test_factory(factory):
try:
factory()
except Exception as e:
pytest.fail(
f"Failed factory initialization for {str(factory)} with exception: {e}"
)
|
Refactor factory testing even more
|
Refactor factory testing even more
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
Refactor factory testing even more
|
import pytest
def test_factory(factory):
try:
factory()
except Exception as e:
pytest.fail(
f"Failed factory initialization for {str(factory)} with exception: {e}"
)
|
<commit_before><commit_msg>Refactor factory testing even more<commit_after>
|
import pytest
def test_factory(factory):
try:
factory()
except Exception as e:
pytest.fail(
f"Failed factory initialization for {str(factory)} with exception: {e}"
)
|
Refactor factory testing even moreimport pytest
def test_factory(factory):
try:
factory()
except Exception as e:
pytest.fail(
f"Failed factory initialization for {str(factory)} with exception: {e}"
)
|
<commit_before><commit_msg>Refactor factory testing even more<commit_after>import pytest
def test_factory(factory):
try:
factory()
except Exception as e:
pytest.fail(
f"Failed factory initialization for {str(factory)} with exception: {e}"
)
|
|
ff9519261e73a77733e6b4566dd1666d05bacf3d
|
display.py
|
display.py
|
import Tkinter
import flaggen
class FlagFrame(Tkinter.Frame):
def __init__(self, master, flag, *args, **kwargs):
Tkinter.Frame.__init__(self, master, *args, **kwargs)
self.flag = flag
self.canvas_height = 250
self.canvas_width = 500
self.canvas = Tkinter.Canvas(self, height=self.canvas_height,
width=self.canvas_width)
self.canvas.grid(row=0, column=0)
self._draw()
def _draw(self):
self.canvas.create_rectangle(0, 0, self.canvas_width,
self.canvas_height, fill=self.flag.bg)
|
Add Module for Displaying Flags in Tkinter Window
|
Add Module for Displaying Flags in Tkinter Window
|
Python
|
mit
|
Eylrid/flaggen
|
Add Module for Displaying Flags in Tkinter Window
|
import Tkinter
import flaggen
class FlagFrame(Tkinter.Frame):
def __init__(self, master, flag, *args, **kwargs):
Tkinter.Frame.__init__(self, master, *args, **kwargs)
self.flag = flag
self.canvas_height = 250
self.canvas_width = 500
self.canvas = Tkinter.Canvas(self, height=self.canvas_height,
width=self.canvas_width)
self.canvas.grid(row=0, column=0)
self._draw()
def _draw(self):
self.canvas.create_rectangle(0, 0, self.canvas_width,
self.canvas_height, fill=self.flag.bg)
|
<commit_before><commit_msg>Add Module for Displaying Flags in Tkinter Window<commit_after>
|
import Tkinter
import flaggen
class FlagFrame(Tkinter.Frame):
def __init__(self, master, flag, *args, **kwargs):
Tkinter.Frame.__init__(self, master, *args, **kwargs)
self.flag = flag
self.canvas_height = 250
self.canvas_width = 500
self.canvas = Tkinter.Canvas(self, height=self.canvas_height,
width=self.canvas_width)
self.canvas.grid(row=0, column=0)
self._draw()
def _draw(self):
self.canvas.create_rectangle(0, 0, self.canvas_width,
self.canvas_height, fill=self.flag.bg)
|
Add Module for Displaying Flags in Tkinter Windowimport Tkinter
import flaggen
class FlagFrame(Tkinter.Frame):
def __init__(self, master, flag, *args, **kwargs):
Tkinter.Frame.__init__(self, master, *args, **kwargs)
self.flag = flag
self.canvas_height = 250
self.canvas_width = 500
self.canvas = Tkinter.Canvas(self, height=self.canvas_height,
width=self.canvas_width)
self.canvas.grid(row=0, column=0)
self._draw()
def _draw(self):
self.canvas.create_rectangle(0, 0, self.canvas_width,
self.canvas_height, fill=self.flag.bg)
|
<commit_before><commit_msg>Add Module for Displaying Flags in Tkinter Window<commit_after>import Tkinter
import flaggen
class FlagFrame(Tkinter.Frame):
def __init__(self, master, flag, *args, **kwargs):
Tkinter.Frame.__init__(self, master, *args, **kwargs)
self.flag = flag
self.canvas_height = 250
self.canvas_width = 500
self.canvas = Tkinter.Canvas(self, height=self.canvas_height,
width=self.canvas_width)
self.canvas.grid(row=0, column=0)
self._draw()
def _draw(self):
self.canvas.create_rectangle(0, 0, self.canvas_width,
self.canvas_height, fill=self.flag.bg)
|
|
5d609b104ea892ab96bd72ec6b67148c0b6eb970
|
temba/flows/migrations/0068_fix_empty_flow_starts.py
|
temba/flows/migrations/0068_fix_empty_flow_starts.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
import json
def fix_empty_starts(apps, schema):
empty_actions = ('[{"msg": {"eng": ""}, "type": "reply"}]', '[{"msg": {"base": ""}, "type": "reply"}]')
from temba.flows.models import ActionSet
# find any action sets that have no msg body
empty_actionsets = ActionSet.objects.filter(actions__in=empty_actions).distinct('flow')
for i, actionset in enumerate(empty_actionsets):
flow = actionset.flow
old_def = flow.as_json()
for actionset in flow.action_sets.all():
if actionset.actions in empty_actions:
print "removing: %s" % actionset.as_json()
actionset.delete()
# set our entry uuid to the highest node
highest_action = flow.action_sets.all().order_by('-y').first()
highest_ruleset = flow.rule_sets.all().order_by('-y').first()
entry_uuid = None
if highest_action and highest_ruleset:
if highest_action.y <= highest_ruleset.y:
entry_uuid = highest_action.uuid
else:
entry_uuid = highest_ruleset.uuid
elif highest_action and not highest_ruleset:
entry_uuid = highest_action.uuid
elif highest_ruleset and not highest_action:
entry_uuid = highest_ruleset.uuid
# save our new entry uuid
flow.entry_uuid = entry_uuid
flow.save(update_fields=['entry_uuid'])
print "=" * 50
print json.dumps(old_def, indent=2)
print "-" * 50
print json.dumps(flow.as_json(), indent=2)
# and create our revision
flow.update(flow.as_json())
print "updated %d of %d actionsets" % (i+1, len(empty_actionsets))
class Migration(migrations.Migration):
dependencies = [
('flows', '0067_flowstart_extra'),
]
operations = [
migrations.RunPython(fix_empty_starts)
]
|
Add migration to remove empty actionsets
|
Add migration to remove empty actionsets
|
Python
|
agpl-3.0
|
tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web
|
Add migration to remove empty actionsets
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
import json
def fix_empty_starts(apps, schema):
empty_actions = ('[{"msg": {"eng": ""}, "type": "reply"}]', '[{"msg": {"base": ""}, "type": "reply"}]')
from temba.flows.models import ActionSet
# find any action sets that have no msg body
empty_actionsets = ActionSet.objects.filter(actions__in=empty_actions).distinct('flow')
for i, actionset in enumerate(empty_actionsets):
flow = actionset.flow
old_def = flow.as_json()
for actionset in flow.action_sets.all():
if actionset.actions in empty_actions:
print "removing: %s" % actionset.as_json()
actionset.delete()
# set our entry uuid to the highest node
highest_action = flow.action_sets.all().order_by('-y').first()
highest_ruleset = flow.rule_sets.all().order_by('-y').first()
entry_uuid = None
if highest_action and highest_ruleset:
if highest_action.y <= highest_ruleset.y:
entry_uuid = highest_action.uuid
else:
entry_uuid = highest_ruleset.uuid
elif highest_action and not highest_ruleset:
entry_uuid = highest_action.uuid
elif highest_ruleset and not highest_action:
entry_uuid = highest_ruleset.uuid
# save our new entry uuid
flow.entry_uuid = entry_uuid
flow.save(update_fields=['entry_uuid'])
print "=" * 50
print json.dumps(old_def, indent=2)
print "-" * 50
print json.dumps(flow.as_json(), indent=2)
# and create our revision
flow.update(flow.as_json())
print "updated %d of %d actionsets" % (i+1, len(empty_actionsets))
class Migration(migrations.Migration):
dependencies = [
('flows', '0067_flowstart_extra'),
]
operations = [
migrations.RunPython(fix_empty_starts)
]
|
<commit_before><commit_msg>Add migration to remove empty actionsets<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
import json
def fix_empty_starts(apps, schema):
empty_actions = ('[{"msg": {"eng": ""}, "type": "reply"}]', '[{"msg": {"base": ""}, "type": "reply"}]')
from temba.flows.models import ActionSet
# find any action sets that have no msg body
empty_actionsets = ActionSet.objects.filter(actions__in=empty_actions).distinct('flow')
for i, actionset in enumerate(empty_actionsets):
flow = actionset.flow
old_def = flow.as_json()
for actionset in flow.action_sets.all():
if actionset.actions in empty_actions:
print "removing: %s" % actionset.as_json()
actionset.delete()
# set our entry uuid to the highest node
highest_action = flow.action_sets.all().order_by('-y').first()
highest_ruleset = flow.rule_sets.all().order_by('-y').first()
entry_uuid = None
if highest_action and highest_ruleset:
if highest_action.y <= highest_ruleset.y:
entry_uuid = highest_action.uuid
else:
entry_uuid = highest_ruleset.uuid
elif highest_action and not highest_ruleset:
entry_uuid = highest_action.uuid
elif highest_ruleset and not highest_action:
entry_uuid = highest_ruleset.uuid
# save our new entry uuid
flow.entry_uuid = entry_uuid
flow.save(update_fields=['entry_uuid'])
print "=" * 50
print json.dumps(old_def, indent=2)
print "-" * 50
print json.dumps(flow.as_json(), indent=2)
# and create our revision
flow.update(flow.as_json())
print "updated %d of %d actionsets" % (i+1, len(empty_actionsets))
class Migration(migrations.Migration):
dependencies = [
('flows', '0067_flowstart_extra'),
]
operations = [
migrations.RunPython(fix_empty_starts)
]
|
Add migration to remove empty actionsets# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
import json
def fix_empty_starts(apps, schema):
empty_actions = ('[{"msg": {"eng": ""}, "type": "reply"}]', '[{"msg": {"base": ""}, "type": "reply"}]')
from temba.flows.models import ActionSet
# find any action sets that have no msg body
empty_actionsets = ActionSet.objects.filter(actions__in=empty_actions).distinct('flow')
for i, actionset in enumerate(empty_actionsets):
flow = actionset.flow
old_def = flow.as_json()
for actionset in flow.action_sets.all():
if actionset.actions in empty_actions:
print "removing: %s" % actionset.as_json()
actionset.delete()
# set our entry uuid to the highest node
highest_action = flow.action_sets.all().order_by('-y').first()
highest_ruleset = flow.rule_sets.all().order_by('-y').first()
entry_uuid = None
if highest_action and highest_ruleset:
if highest_action.y <= highest_ruleset.y:
entry_uuid = highest_action.uuid
else:
entry_uuid = highest_ruleset.uuid
elif highest_action and not highest_ruleset:
entry_uuid = highest_action.uuid
elif highest_ruleset and not highest_action:
entry_uuid = highest_ruleset.uuid
# save our new entry uuid
flow.entry_uuid = entry_uuid
flow.save(update_fields=['entry_uuid'])
print "=" * 50
print json.dumps(old_def, indent=2)
print "-" * 50
print json.dumps(flow.as_json(), indent=2)
# and create our revision
flow.update(flow.as_json())
print "updated %d of %d actionsets" % (i+1, len(empty_actionsets))
class Migration(migrations.Migration):
dependencies = [
('flows', '0067_flowstart_extra'),
]
operations = [
migrations.RunPython(fix_empty_starts)
]
|
<commit_before><commit_msg>Add migration to remove empty actionsets<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
import json
def fix_empty_starts(apps, schema):
empty_actions = ('[{"msg": {"eng": ""}, "type": "reply"}]', '[{"msg": {"base": ""}, "type": "reply"}]')
from temba.flows.models import ActionSet
# find any action sets that have no msg body
empty_actionsets = ActionSet.objects.filter(actions__in=empty_actions).distinct('flow')
for i, actionset in enumerate(empty_actionsets):
flow = actionset.flow
old_def = flow.as_json()
for actionset in flow.action_sets.all():
if actionset.actions in empty_actions:
print "removing: %s" % actionset.as_json()
actionset.delete()
# set our entry uuid to the highest node
highest_action = flow.action_sets.all().order_by('-y').first()
highest_ruleset = flow.rule_sets.all().order_by('-y').first()
entry_uuid = None
if highest_action and highest_ruleset:
if highest_action.y <= highest_ruleset.y:
entry_uuid = highest_action.uuid
else:
entry_uuid = highest_ruleset.uuid
elif highest_action and not highest_ruleset:
entry_uuid = highest_action.uuid
elif highest_ruleset and not highest_action:
entry_uuid = highest_ruleset.uuid
# save our new entry uuid
flow.entry_uuid = entry_uuid
flow.save(update_fields=['entry_uuid'])
print "=" * 50
print json.dumps(old_def, indent=2)
print "-" * 50
print json.dumps(flow.as_json(), indent=2)
# and create our revision
flow.update(flow.as_json())
print "updated %d of %d actionsets" % (i+1, len(empty_actionsets))
class Migration(migrations.Migration):
dependencies = [
('flows', '0067_flowstart_extra'),
]
operations = [
migrations.RunPython(fix_empty_starts)
]
|
|
46852d7ed9a4e03084cbd2c3d296d10946e5190f
|
ceph_deploy/tests/test_cli_rgw.py
|
ceph_deploy/tests/test_cli_rgw.py
|
import pytest
import subprocess
import ceph_deploy.rgw as rgw
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'rgw', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy rgw' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_conf(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'rgw'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy rgw' in result
assert err.value.status == 2
def test_rgw_prefix_auto():
daemon = rgw.colon_separated("hostname")
assert daemon == ("hostname", "rgw.hostname")
def test_rgw_prefix_custom():
daemon = rgw.colon_separated("hostname:mydaemon")
assert daemon == ("hostname", "rgw.mydaemon")
|
Add tests for RGW daemon naming
|
Add tests for RGW daemon naming
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
ddiss/ceph-deploy,trhoden/ceph-deploy,shenhequnying/ceph-deploy,ceph/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ceph/ceph-deploy,ghxandsky/ceph-deploy,isyippee/ceph-deploy,isyippee/ceph-deploy,imzhulei/ceph-deploy,ddiss/ceph-deploy,alfredodeza/ceph-deploy,branto1/ceph-deploy,branto1/ceph-deploy,Vicente-Cheng/ceph-deploy,codenrhoden/ceph-deploy,zhouyuan/ceph-deploy,ghxandsky/ceph-deploy,SUSE/ceph-deploy,osynge/ceph-deploy,Vicente-Cheng/ceph-deploy,trhoden/ceph-deploy,zhouyuan/ceph-deploy,codenrhoden/ceph-deploy,alfredodeza/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,osynge/ceph-deploy,SUSE/ceph-deploy,shenhequnying/ceph-deploy
|
Add tests for RGW daemon naming
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
import subprocess
import ceph_deploy.rgw as rgw
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'rgw', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy rgw' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_conf(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'rgw'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy rgw' in result
assert err.value.status == 2
def test_rgw_prefix_auto():
daemon = rgw.colon_separated("hostname")
assert daemon == ("hostname", "rgw.hostname")
def test_rgw_prefix_custom():
daemon = rgw.colon_separated("hostname:mydaemon")
assert daemon == ("hostname", "rgw.mydaemon")
|
<commit_before><commit_msg>Add tests for RGW daemon naming
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
import subprocess
import ceph_deploy.rgw as rgw
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'rgw', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy rgw' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_conf(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'rgw'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy rgw' in result
assert err.value.status == 2
def test_rgw_prefix_auto():
daemon = rgw.colon_separated("hostname")
assert daemon == ("hostname", "rgw.hostname")
def test_rgw_prefix_custom():
daemon = rgw.colon_separated("hostname:mydaemon")
assert daemon == ("hostname", "rgw.mydaemon")
|
Add tests for RGW daemon naming
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
import subprocess
import ceph_deploy.rgw as rgw
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'rgw', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy rgw' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_conf(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'rgw'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy rgw' in result
assert err.value.status == 2
def test_rgw_prefix_auto():
daemon = rgw.colon_separated("hostname")
assert daemon == ("hostname", "rgw.hostname")
def test_rgw_prefix_custom():
daemon = rgw.colon_separated("hostname:mydaemon")
assert daemon == ("hostname", "rgw.mydaemon")
|
<commit_before><commit_msg>Add tests for RGW daemon naming
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
import subprocess
import ceph_deploy.rgw as rgw
def test_help(tmpdir, cli):
with cli(
args=['ceph-deploy', 'rgw', '--help'],
stdout=subprocess.PIPE,
) as p:
result = p.stdout.read()
assert 'usage: ceph-deploy rgw' in result
assert 'positional arguments' in result
assert 'optional arguments' in result
def test_bad_no_conf(tmpdir, cli):
with tmpdir.join('ceph.conf').open('w'):
pass
with pytest.raises(cli.Failed) as err:
with cli(
args=['ceph-deploy', 'rgw'],
stderr=subprocess.PIPE,
) as p:
result = p.stderr.read()
assert 'usage: ceph-deploy rgw' in result
assert err.value.status == 2
def test_rgw_prefix_auto():
daemon = rgw.colon_separated("hostname")
assert daemon == ("hostname", "rgw.hostname")
def test_rgw_prefix_custom():
daemon = rgw.colon_separated("hostname:mydaemon")
assert daemon == ("hostname", "rgw.mydaemon")
|
|
eee6b64a46b24f5297940d5b93201ebd5bd71959
|
cerbero/commands/debugpackages.py
|
cerbero/commands/debugpackages.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import collections
import itertools
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
from cerbero.packages.package import Package
class DebugPackages(Command):
doc = N_('Outputs debug information about package, like duplicates files '
'or files that do not belong to any package')
name = 'debug-packages'
def __init__(self):
Command.__init__(self, [
ArgparseArgument('-e', '--exclude', nargs='*',
help=_('Output directory for the tarball file')),
])
def run(self, config, args):
store = PackagesStore(config)
allfiles = [p.get_files_list() for p in store.get_packages_list() if\
isinstance(p, Package)]
allfiles = list(itertools.chain(*allfiles))
self.find_duplicates(allfiles)
self.find_orphan_files(allfiles, config.prefix, args.exclude)
def find_duplicates(self, allfiles):
count = collections.Counter(allfiles)
duplicates = [x for x in count if count[x] > 1]
if len(duplicates) > 0:
m.message("Found duplicates files in packages:")
m.message("%r" % duplicates)
def find_orphan_files(self, allfiles, prefix, excludes=[]):
cmd = 'find . -type f %s'
exc = map(lambda x: "\\( ! -name '%s' \\)" % x, excludes)
cmd = cmd % ' '.join(exc)
distfiles = shell.check_call(cmd, prefix).split('\n')
# remove './' from the list of files
distfiles = [f[2:] for f in distfiles]
orphan = sorted(list((set(distfiles) - set(allfiles))))
if len(orphan) > 0:
m.message("Found orphan files:")
m.message('\n'.join(orphan))
register_command(DebugPackages)
|
Add command to debug packages
|
Add command to debug packages
|
Python
|
lgpl-2.1
|
jackjansen/cerbero-2013,sdroege/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,flexVDI/cerbero,nzjrs/cerbero,ylatuya/cerbero,brion/cerbero,ikonst/cerbero,ramaxlo/cerbero,EricssonResearch/cerbero,shoreflyer/cerbero,ikonst/cerbero,ylatuya/cerbero,shoreflyer/cerbero,ramaxlo/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,ramaxlo/cerbero,davibe/cerbero,nzjrs/cerbero,jackjansen/cerbero-2013,atsushieno/cerbero,ford-prefect/cerbero,AlertMe/cerbero,cee1/cerbero-mac,shoreflyer/cerbero,nicolewu/cerbero,jackjansen/cerbero,cee1/cerbero-mac,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,OptoFidelity/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,ramaxlo/cerbero,superdump/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,fluendo/cerbero,BigBrother-International/gst-cerbero,BigBrother-International/gst-cerbero,jackjansen/cerbero-2013,nirbheek/cerbero,AlertMe/cerbero,OptoFidelity/cerbero,sdroege/cerbero,multipath-rtp/cerbero,ylatuya/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,atsushieno/cerbero,ford-prefect/cerbero,GStreamer/cerbero,fluendo/cerbero,multipath-rtp/cerbero,brion/cerbero,flexVDI/cerbero,justinjoy/cerbero,atsushieno/cerbero,jackjansen/cerbero-2013,justinjoy/cerbero,davibe/cerbero,GStreamer/cerbero,OptoFidelity/cerbero,sdroege/cerbero,fluendo/cerbero,centricular/cerbero,nirbheek/cerbero-old,multipath-rtp/cerbero,superdump/cerbero,lubosz/cerbero,cee1/cerbero-mac,nzjrs/cerbero,sdroege/cerbero,AlertMe/cerbero,brion/cerbero,GStreamer/cerbero,GStreamer/cerbero,sdroege/cerbero,fluendo/cerbero,atsushieno/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,davibe/cerbero,lubosz/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,nzjrs/cerbero,davibe/cerbero,cee1/cerbero-mac,nirbheek/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,AlertMe/cerbero,BigBrother-International/gst-cerbero,nirbheek/cerbero,nirbheek/cerbero-old,superdump/cerbero,justinjoy/cerbero,lubosz/cerbero,ylatuya/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,ford-prefect/cerbero,BigBrother-International/gst-cerbero,fluendo/cerbero,ikonst/cerbero,superdump/cerbero,flexVDI/cerbero,AlertMe/cerbero,ikonst/cerbero,jackjansen/cerbero,EricssonResearch/cerbero,brion/cerbero,centricular/cerbero,atsushieno/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,centricular/cerbero,flexVDI/cerbero,ikonst/cerbero,BigBrother-International/gst-cerbero,centricular/cerbero,nirbheek/cerbero-old,justinjoy/cerbero,centricular/cerbero,brion/cerbero,nicolewu/cerbero,EricssonResearch/cerbero,EricssonResearch/cerbero,lubosz/cerbero,nirbheek/cerbero,jackjansen/cerbero,shoreflyer/cerbero,ford-prefect/cerbero,OptoFidelity/cerbero,flexVDI/cerbero,shoreflyer/cerbero,EricssonResearch/cerbero,jackjansen/cerbero,nirbheek/cerbero-old,jackjansen/cerbero-2013,multipath-rtp/cerbero,multipath-rtp/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,nzjrs/cerbero,GStreamer/cerbero,ramaxlo/cerbero,nicolewu/cerbero
|
Add command to debug packages
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import collections
import itertools
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
from cerbero.packages.package import Package
class DebugPackages(Command):
doc = N_('Outputs debug information about package, like duplicates files '
'or files that do not belong to any package')
name = 'debug-packages'
def __init__(self):
Command.__init__(self, [
ArgparseArgument('-e', '--exclude', nargs='*',
help=_('Output directory for the tarball file')),
])
def run(self, config, args):
store = PackagesStore(config)
allfiles = [p.get_files_list() for p in store.get_packages_list() if\
isinstance(p, Package)]
allfiles = list(itertools.chain(*allfiles))
self.find_duplicates(allfiles)
self.find_orphan_files(allfiles, config.prefix, args.exclude)
def find_duplicates(self, allfiles):
count = collections.Counter(allfiles)
duplicates = [x for x in count if count[x] > 1]
if len(duplicates) > 0:
m.message("Found duplicates files in packages:")
m.message("%r" % duplicates)
def find_orphan_files(self, allfiles, prefix, excludes=[]):
cmd = 'find . -type f %s'
exc = map(lambda x: "\\( ! -name '%s' \\)" % x, excludes)
cmd = cmd % ' '.join(exc)
distfiles = shell.check_call(cmd, prefix).split('\n')
# remove './' from the list of files
distfiles = [f[2:] for f in distfiles]
orphan = sorted(list((set(distfiles) - set(allfiles))))
if len(orphan) > 0:
m.message("Found orphan files:")
m.message('\n'.join(orphan))
register_command(DebugPackages)
|
<commit_before><commit_msg>Add command to debug packages<commit_after>
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import collections
import itertools
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
from cerbero.packages.package import Package
class DebugPackages(Command):
doc = N_('Outputs debug information about package, like duplicates files '
'or files that do not belong to any package')
name = 'debug-packages'
def __init__(self):
Command.__init__(self, [
ArgparseArgument('-e', '--exclude', nargs='*',
help=_('Output directory for the tarball file')),
])
def run(self, config, args):
store = PackagesStore(config)
allfiles = [p.get_files_list() for p in store.get_packages_list() if\
isinstance(p, Package)]
allfiles = list(itertools.chain(*allfiles))
self.find_duplicates(allfiles)
self.find_orphan_files(allfiles, config.prefix, args.exclude)
def find_duplicates(self, allfiles):
count = collections.Counter(allfiles)
duplicates = [x for x in count if count[x] > 1]
if len(duplicates) > 0:
m.message("Found duplicates files in packages:")
m.message("%r" % duplicates)
def find_orphan_files(self, allfiles, prefix, excludes=[]):
cmd = 'find . -type f %s'
exc = map(lambda x: "\\( ! -name '%s' \\)" % x, excludes)
cmd = cmd % ' '.join(exc)
distfiles = shell.check_call(cmd, prefix).split('\n')
# remove './' from the list of files
distfiles = [f[2:] for f in distfiles]
orphan = sorted(list((set(distfiles) - set(allfiles))))
if len(orphan) > 0:
m.message("Found orphan files:")
m.message('\n'.join(orphan))
register_command(DebugPackages)
|
Add command to debug packages# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import collections
import itertools
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
from cerbero.packages.package import Package
class DebugPackages(Command):
doc = N_('Outputs debug information about package, like duplicates files '
'or files that do not belong to any package')
name = 'debug-packages'
def __init__(self):
Command.__init__(self, [
ArgparseArgument('-e', '--exclude', nargs='*',
help=_('Output directory for the tarball file')),
])
def run(self, config, args):
store = PackagesStore(config)
allfiles = [p.get_files_list() for p in store.get_packages_list() if\
isinstance(p, Package)]
allfiles = list(itertools.chain(*allfiles))
self.find_duplicates(allfiles)
self.find_orphan_files(allfiles, config.prefix, args.exclude)
def find_duplicates(self, allfiles):
count = collections.Counter(allfiles)
duplicates = [x for x in count if count[x] > 1]
if len(duplicates) > 0:
m.message("Found duplicates files in packages:")
m.message("%r" % duplicates)
def find_orphan_files(self, allfiles, prefix, excludes=[]):
cmd = 'find . -type f %s'
exc = map(lambda x: "\\( ! -name '%s' \\)" % x, excludes)
cmd = cmd % ' '.join(exc)
distfiles = shell.check_call(cmd, prefix).split('\n')
# remove './' from the list of files
distfiles = [f[2:] for f in distfiles]
orphan = sorted(list((set(distfiles) - set(allfiles))))
if len(orphan) > 0:
m.message("Found orphan files:")
m.message('\n'.join(orphan))
register_command(DebugPackages)
|
<commit_before><commit_msg>Add command to debug packages<commit_after># cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import collections
import itertools
from cerbero.commands import Command, register_command
from cerbero.utils import _, N_, ArgparseArgument, shell
from cerbero.utils import messages as m
from cerbero.packages.packagesstore import PackagesStore
from cerbero.packages.package import Package
class DebugPackages(Command):
doc = N_('Outputs debug information about package, like duplicates files '
'or files that do not belong to any package')
name = 'debug-packages'
def __init__(self):
Command.__init__(self, [
ArgparseArgument('-e', '--exclude', nargs='*',
help=_('Output directory for the tarball file')),
])
def run(self, config, args):
store = PackagesStore(config)
allfiles = [p.get_files_list() for p in store.get_packages_list() if\
isinstance(p, Package)]
allfiles = list(itertools.chain(*allfiles))
self.find_duplicates(allfiles)
self.find_orphan_files(allfiles, config.prefix, args.exclude)
def find_duplicates(self, allfiles):
count = collections.Counter(allfiles)
duplicates = [x for x in count if count[x] > 1]
if len(duplicates) > 0:
m.message("Found duplicates files in packages:")
m.message("%r" % duplicates)
def find_orphan_files(self, allfiles, prefix, excludes=[]):
cmd = 'find . -type f %s'
exc = map(lambda x: "\\( ! -name '%s' \\)" % x, excludes)
cmd = cmd % ' '.join(exc)
distfiles = shell.check_call(cmd, prefix).split('\n')
# remove './' from the list of files
distfiles = [f[2:] for f in distfiles]
orphan = sorted(list((set(distfiles) - set(allfiles))))
if len(orphan) > 0:
m.message("Found orphan files:")
m.message('\n'.join(orphan))
register_command(DebugPackages)
|
|
96e782563564af0291d8e536a5adc0b20caf1648
|
tests/test_dbgp_eval_property.py
|
tests/test_dbgp_eval_property.py
|
if __name__ == "__main__":
import sys
sys.path.append('../plugin/python/')
import unittest2 as unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
|
Add failing test for eval'd numeric array keys
|
Add failing test for eval'd numeric array keys
|
Python
|
mit
|
paprykarz/vdebug,joonty/vdebug,joonty/vdebug,Taluu/vdebug,paprykarz/vdebug,joonty/vdebug,Taluu/vdebug,Taluu/vdebug,paprykarz/vdebug
|
Add failing test for eval'd numeric array keys
|
if __name__ == "__main__":
import sys
sys.path.append('../plugin/python/')
import unittest2 as unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
|
<commit_before><commit_msg>Add failing test for eval'd numeric array keys<commit_after>
|
if __name__ == "__main__":
import sys
sys.path.append('../plugin/python/')
import unittest2 as unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
|
Add failing test for eval'd numeric array keysif __name__ == "__main__":
import sys
sys.path.append('../plugin/python/')
import unittest2 as unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
|
<commit_before><commit_msg>Add failing test for eval'd numeric array keys<commit_after>if __name__ == "__main__":
import sys
sys.path.append('../plugin/python/')
import unittest2 as unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
|
|
8bc64c7e0d461ba51459f927f54822cbacf8404f
|
Surface/rotate.py
|
Surface/rotate.py
|
import sys
import os
import subprocess
import re
status = subprocess.check_output(["xrandr", "-q"])
print (status)
lines = status.split("\n")
print (lines)
stat = ""
for line in lines:
if "eDP-1" in line:
stat = line
orientation = stat.split(" ")[4]
if len(sys.argv) < 2:
exit()
elif (sys.argv[1] == "-l"):
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
else:
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
|
Add screen rotation script for surface
|
Add screen rotation script for surface
|
Python
|
apache-2.0
|
NickTGraham/PythonPack,NickTGraham/PythonPack
|
Add screen rotation script for surface
|
import sys
import os
import subprocess
import re
status = subprocess.check_output(["xrandr", "-q"])
print (status)
lines = status.split("\n")
print (lines)
stat = ""
for line in lines:
if "eDP-1" in line:
stat = line
orientation = stat.split(" ")[4]
if len(sys.argv) < 2:
exit()
elif (sys.argv[1] == "-l"):
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
else:
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
|
<commit_before><commit_msg>Add screen rotation script for surface<commit_after>
|
import sys
import os
import subprocess
import re
status = subprocess.check_output(["xrandr", "-q"])
print (status)
lines = status.split("\n")
print (lines)
stat = ""
for line in lines:
if "eDP-1" in line:
stat = line
orientation = stat.split(" ")[4]
if len(sys.argv) < 2:
exit()
elif (sys.argv[1] == "-l"):
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
else:
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
|
Add screen rotation script for surfaceimport sys
import os
import subprocess
import re
status = subprocess.check_output(["xrandr", "-q"])
print (status)
lines = status.split("\n")
print (lines)
stat = ""
for line in lines:
if "eDP-1" in line:
stat = line
orientation = stat.split(" ")[4]
if len(sys.argv) < 2:
exit()
elif (sys.argv[1] == "-l"):
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
else:
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
|
<commit_before><commit_msg>Add screen rotation script for surface<commit_after>import sys
import os
import subprocess
import re
status = subprocess.check_output(["xrandr", "-q"])
print (status)
lines = status.split("\n")
print (lines)
stat = ""
for line in lines:
if "eDP-1" in line:
stat = line
orientation = stat.split(" ")[4]
if len(sys.argv) < 2:
exit()
elif (sys.argv[1] == "-l"):
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
else:
if (orientation == "left"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "normal"])
elif (orientation == "inverted"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "left"])
elif (orientation == "right"):
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "inverted"])
else:
subprocess.call(["xrandr", "--output", "eDP-1", "--rotate", "right"])
|
|
cfe18ca2fb113379af33f7172c8bb67787c82a1d
|
test_valid_object_file.py
|
test_valid_object_file.py
|
from astropy.table import Table
TABLE_NAME = 'feder_object_list.csv'
def test_table_can_be_read():
objs = Table.read(TABLE_NAME, format='ascii', delimiter=',')
columns = ['object', 'ra', 'dec']
for col in columns:
assert col in objs.colnames
|
Add simple test that table is readable
|
Add simple test that table is readable
|
Python
|
bsd-2-clause
|
mwcraig/feder-object-list
|
Add simple test that table is readable
|
from astropy.table import Table
TABLE_NAME = 'feder_object_list.csv'
def test_table_can_be_read():
objs = Table.read(TABLE_NAME, format='ascii', delimiter=',')
columns = ['object', 'ra', 'dec']
for col in columns:
assert col in objs.colnames
|
<commit_before><commit_msg>Add simple test that table is readable<commit_after>
|
from astropy.table import Table
TABLE_NAME = 'feder_object_list.csv'
def test_table_can_be_read():
objs = Table.read(TABLE_NAME, format='ascii', delimiter=',')
columns = ['object', 'ra', 'dec']
for col in columns:
assert col in objs.colnames
|
Add simple test that table is readablefrom astropy.table import Table
TABLE_NAME = 'feder_object_list.csv'
def test_table_can_be_read():
objs = Table.read(TABLE_NAME, format='ascii', delimiter=',')
columns = ['object', 'ra', 'dec']
for col in columns:
assert col in objs.colnames
|
<commit_before><commit_msg>Add simple test that table is readable<commit_after>from astropy.table import Table
TABLE_NAME = 'feder_object_list.csv'
def test_table_can_be_read():
objs = Table.read(TABLE_NAME, format='ascii', delimiter=',')
columns = ['object', 'ra', 'dec']
for col in columns:
assert col in objs.colnames
|
|
1b42dc4d49ccbef9b2ed4bd31e8bb32b597a3575
|
oscar/agent/scripted/minigame/nicolas_mineralshard.py
|
oscar/agent/scripted/minigame/nicolas_mineralshard.py
|
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_NEW_SELECTION = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
player_relative = obs.observation["screen"][_PLAYER_RELATIVE]
if _MOVE_SCREEN in obs.observation["available_actions"]:
neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
if not neutral_y.any() or not player_y.any():
return actions.FunctionCall(_NO_OP, [])
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])
else:
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
return actions.FunctionCall(_SELECT_POINT, [_NEW_SELECTION, [player_x[0], player_y[0]]])
|
Create a new scripted agent: copy from the deepmind one but do not select the two marins, only one
|
Create a new scripted agent: copy from the deepmind one but do not select the two marins, only one
|
Python
|
apache-2.0
|
Xaxetrov/OSCAR,Xaxetrov/OSCAR
|
Create a new scripted agent: copy from the deepmind one but do not select the two marins, only one
|
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_NEW_SELECTION = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
player_relative = obs.observation["screen"][_PLAYER_RELATIVE]
if _MOVE_SCREEN in obs.observation["available_actions"]:
neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
if not neutral_y.any() or not player_y.any():
return actions.FunctionCall(_NO_OP, [])
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])
else:
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
return actions.FunctionCall(_SELECT_POINT, [_NEW_SELECTION, [player_x[0], player_y[0]]])
|
<commit_before><commit_msg>Create a new scripted agent: copy from the deepmind one but do not select the two marins, only one<commit_after>
|
import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_NEW_SELECTION = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
player_relative = obs.observation["screen"][_PLAYER_RELATIVE]
if _MOVE_SCREEN in obs.observation["available_actions"]:
neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
if not neutral_y.any() or not player_y.any():
return actions.FunctionCall(_NO_OP, [])
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])
else:
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
return actions.FunctionCall(_SELECT_POINT, [_NEW_SELECTION, [player_x[0], player_y[0]]])
|
Create a new scripted agent: copy from the deepmind one but do not select the two marins, only oneimport numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_NEW_SELECTION = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
player_relative = obs.observation["screen"][_PLAYER_RELATIVE]
if _MOVE_SCREEN in obs.observation["available_actions"]:
neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
if not neutral_y.any() or not player_y.any():
return actions.FunctionCall(_NO_OP, [])
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])
else:
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
return actions.FunctionCall(_SELECT_POINT, [_NEW_SELECTION, [player_x[0], player_y[0]]])
|
<commit_before><commit_msg>Create a new scripted agent: copy from the deepmind one but do not select the two marins, only one<commit_after>import numpy
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_NEW_SELECTION = [0]
class CollectMineralShards(base_agent.BaseAgent):
"""An agent specifically for solving the CollectMineralShards map."""
def step(self, obs):
super(CollectMineralShards, self).step(obs)
player_relative = obs.observation["screen"][_PLAYER_RELATIVE]
if _MOVE_SCREEN in obs.observation["available_actions"]:
neutral_y, neutral_x = (player_relative == _PLAYER_NEUTRAL).nonzero()
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
if not neutral_y.any() or not player_y.any():
return actions.FunctionCall(_NO_OP, [])
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = numpy.linalg.norm(numpy.array(player) - numpy.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
return actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, closest])
else:
player_y, player_x = (player_relative == _PLAYER_FRIENDLY).nonzero()
return actions.FunctionCall(_SELECT_POINT, [_NEW_SELECTION, [player_x[0], player_y[0]]])
|
|
89d6ce60906bf619bea8cb5c8d0d51c9269fc781
|
tools/stats/box_recall.py
|
tools/stats/box_recall.py
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame, boxes_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('box_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
box_proto = proto_load(args.box_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# proposal boxes
proposal_boxes = boxes_at_frame(box_proto, frame_id)
proposal_boxes = [box['bbox'] for box in proposal_boxes]
if len(proposal_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(proposal_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
Add script to calculate box proto recall.
|
Add script to calculate box proto recall.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add script to calculate box proto recall.
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame, boxes_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('box_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
box_proto = proto_load(args.box_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# proposal boxes
proposal_boxes = boxes_at_frame(box_proto, frame_id)
proposal_boxes = [box['bbox'] for box in proposal_boxes]
if len(proposal_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(proposal_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
<commit_before><commit_msg>Add script to calculate box proto recall.<commit_after>
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame, boxes_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('box_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
box_proto = proto_load(args.box_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# proposal boxes
proposal_boxes = boxes_at_frame(box_proto, frame_id)
proposal_boxes = [box['bbox'] for box in proposal_boxes]
if len(proposal_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(proposal_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
Add script to calculate box proto recall.#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame, boxes_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('box_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
box_proto = proto_load(args.box_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# proposal boxes
proposal_boxes = boxes_at_frame(box_proto, frame_id)
proposal_boxes = [box['bbox'] for box in proposal_boxes]
if len(proposal_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(proposal_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
<commit_before><commit_msg>Add script to calculate box proto recall.<commit_after>#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame, boxes_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('box_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
box_proto = proto_load(args.box_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# proposal boxes
proposal_boxes = boxes_at_frame(box_proto, frame_id)
proposal_boxes = [box['bbox'] for box in proposal_boxes]
if len(proposal_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(proposal_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
|
46a0acede9dbbb1e751f5197a22be545e291d042
|
src/ggrc/migrations/versions/20170219221807_4e7fda17abc7_fix_assessment_contexts.py
|
src/ggrc/migrations/versions/20170219221807_4e7fda17abc7_fix_assessment_contexts.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix Assessment contexts
Create Date: 2017-02-19 22:18:07.518997
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4e7fda17abc7'
down_revision = '2f1cee67a8f3'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
# Fixes assessments without audit context
# SELECT COUNT(*) FROM assessments
# WHERE context_id is NULL;
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.source_id = a.id
AND r.source_type = 'Assessment' AND r.destination_type = 'Audit'
JOIN audits AS au ON r.destination_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.destination_id = a.id
AND r.destination_type = 'Assessment' AND r.source_type = 'Audit'
JOIN audits AS au ON r.source_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
# Fixes object_documents mapped to assessments without audit context
# SELECT COUNT(*) FROM object_documents
# WHERE documentable_type = 'Assessment' AND context_id IS NULL;
sql = """
UPDATE object_documents AS od
JOIN assessments AS a ON od.documentable_id = a.id
SET od.context_id = a.context_id
WHERE documentable_type = 'Assessment' AND od.context_id IS NULL;
"""
op.execute(sql)
# Fixes documents attached to assessments without audit context
# SELECT count(*)
# FROM documents AS d
# JOIN object_documents AS od ON d.id = od.document_id
# WHERE od.documentable_type = 'Assessment' AND d.context_id IS NULL;
sql = """
UPDATE documents AS d
JOIN object_documents AS od ON od.document_id = d.id
AND od.documentable_type = 'Assessment'
SET d.context_id = od.context_id
WHERE d.context_id IS NULL
"""
op.execute(sql)
def downgrade():
"""Nothing to do here."""
pass
|
Fix missing contexts for assessment evidence
|
Fix missing contexts for assessment evidence
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core
|
Fix missing contexts for assessment evidence
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix Assessment contexts
Create Date: 2017-02-19 22:18:07.518997
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4e7fda17abc7'
down_revision = '2f1cee67a8f3'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
# Fixes assessments without audit context
# SELECT COUNT(*) FROM assessments
# WHERE context_id is NULL;
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.source_id = a.id
AND r.source_type = 'Assessment' AND r.destination_type = 'Audit'
JOIN audits AS au ON r.destination_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.destination_id = a.id
AND r.destination_type = 'Assessment' AND r.source_type = 'Audit'
JOIN audits AS au ON r.source_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
# Fixes object_documents mapped to assessments without audit context
# SELECT COUNT(*) FROM object_documents
# WHERE documentable_type = 'Assessment' AND context_id IS NULL;
sql = """
UPDATE object_documents AS od
JOIN assessments AS a ON od.documentable_id = a.id
SET od.context_id = a.context_id
WHERE documentable_type = 'Assessment' AND od.context_id IS NULL;
"""
op.execute(sql)
# Fixes documents attached to assessments without audit context
# SELECT count(*)
# FROM documents AS d
# JOIN object_documents AS od ON d.id = od.document_id
# WHERE od.documentable_type = 'Assessment' AND d.context_id IS NULL;
sql = """
UPDATE documents AS d
JOIN object_documents AS od ON od.document_id = d.id
AND od.documentable_type = 'Assessment'
SET d.context_id = od.context_id
WHERE d.context_id IS NULL
"""
op.execute(sql)
def downgrade():
"""Nothing to do here."""
pass
|
<commit_before><commit_msg>Fix missing contexts for assessment evidence<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix Assessment contexts
Create Date: 2017-02-19 22:18:07.518997
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4e7fda17abc7'
down_revision = '2f1cee67a8f3'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
# Fixes assessments without audit context
# SELECT COUNT(*) FROM assessments
# WHERE context_id is NULL;
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.source_id = a.id
AND r.source_type = 'Assessment' AND r.destination_type = 'Audit'
JOIN audits AS au ON r.destination_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.destination_id = a.id
AND r.destination_type = 'Assessment' AND r.source_type = 'Audit'
JOIN audits AS au ON r.source_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
# Fixes object_documents mapped to assessments without audit context
# SELECT COUNT(*) FROM object_documents
# WHERE documentable_type = 'Assessment' AND context_id IS NULL;
sql = """
UPDATE object_documents AS od
JOIN assessments AS a ON od.documentable_id = a.id
SET od.context_id = a.context_id
WHERE documentable_type = 'Assessment' AND od.context_id IS NULL;
"""
op.execute(sql)
# Fixes documents attached to assessments without audit context
# SELECT count(*)
# FROM documents AS d
# JOIN object_documents AS od ON d.id = od.document_id
# WHERE od.documentable_type = 'Assessment' AND d.context_id IS NULL;
sql = """
UPDATE documents AS d
JOIN object_documents AS od ON od.document_id = d.id
AND od.documentable_type = 'Assessment'
SET d.context_id = od.context_id
WHERE d.context_id IS NULL
"""
op.execute(sql)
def downgrade():
"""Nothing to do here."""
pass
|
Fix missing contexts for assessment evidence# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix Assessment contexts
Create Date: 2017-02-19 22:18:07.518997
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4e7fda17abc7'
down_revision = '2f1cee67a8f3'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
# Fixes assessments without audit context
# SELECT COUNT(*) FROM assessments
# WHERE context_id is NULL;
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.source_id = a.id
AND r.source_type = 'Assessment' AND r.destination_type = 'Audit'
JOIN audits AS au ON r.destination_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.destination_id = a.id
AND r.destination_type = 'Assessment' AND r.source_type = 'Audit'
JOIN audits AS au ON r.source_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
# Fixes object_documents mapped to assessments without audit context
# SELECT COUNT(*) FROM object_documents
# WHERE documentable_type = 'Assessment' AND context_id IS NULL;
sql = """
UPDATE object_documents AS od
JOIN assessments AS a ON od.documentable_id = a.id
SET od.context_id = a.context_id
WHERE documentable_type = 'Assessment' AND od.context_id IS NULL;
"""
op.execute(sql)
# Fixes documents attached to assessments without audit context
# SELECT count(*)
# FROM documents AS d
# JOIN object_documents AS od ON d.id = od.document_id
# WHERE od.documentable_type = 'Assessment' AND d.context_id IS NULL;
sql = """
UPDATE documents AS d
JOIN object_documents AS od ON od.document_id = d.id
AND od.documentable_type = 'Assessment'
SET d.context_id = od.context_id
WHERE d.context_id IS NULL
"""
op.execute(sql)
def downgrade():
"""Nothing to do here."""
pass
|
<commit_before><commit_msg>Fix missing contexts for assessment evidence<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix Assessment contexts
Create Date: 2017-02-19 22:18:07.518997
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '4e7fda17abc7'
down_revision = '2f1cee67a8f3'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
# Fixes assessments without audit context
# SELECT COUNT(*) FROM assessments
# WHERE context_id is NULL;
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.source_id = a.id
AND r.source_type = 'Assessment' AND r.destination_type = 'Audit'
JOIN audits AS au ON r.destination_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
sql = """
UPDATE assessments as a
JOIN relationships AS r ON r.destination_id = a.id
AND r.destination_type = 'Assessment' AND r.source_type = 'Audit'
JOIN audits AS au ON r.source_id = au.id
SET a.context_id = au.context_id
WHERE a.context_id is NULL;
"""
op.execute(sql)
# Fixes object_documents mapped to assessments without audit context
# SELECT COUNT(*) FROM object_documents
# WHERE documentable_type = 'Assessment' AND context_id IS NULL;
sql = """
UPDATE object_documents AS od
JOIN assessments AS a ON od.documentable_id = a.id
SET od.context_id = a.context_id
WHERE documentable_type = 'Assessment' AND od.context_id IS NULL;
"""
op.execute(sql)
# Fixes documents attached to assessments without audit context
# SELECT count(*)
# FROM documents AS d
# JOIN object_documents AS od ON d.id = od.document_id
# WHERE od.documentable_type = 'Assessment' AND d.context_id IS NULL;
sql = """
UPDATE documents AS d
JOIN object_documents AS od ON od.document_id = d.id
AND od.documentable_type = 'Assessment'
SET d.context_id = od.context_id
WHERE d.context_id IS NULL
"""
op.execute(sql)
def downgrade():
"""Nothing to do here."""
pass
|
|
fcddd7145bf7ab33cf90f8d4d39cfc9bb927e26d
|
ws-tests/test_invalid_merge.py
|
ws-tests/test_invalid_merge.py
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for an invalid merge
|
Add a test for an invalid merge
|
Python
|
bsd-2-clause
|
leto/new_opentree_api,leto/new_opentree_api
|
Add a test for an invalid merge
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for an invalid merge<commit_after>
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for an invalid merge#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for an invalid merge<commit_after>#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import sys
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/merge/v1/master/master'
data = {
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
|
e17e436b7671b3c6834d286c91f541ee768fadac
|
script/gen-iana-rclasses.py
|
script/gen-iana-rclasses.py
|
import enumgen
from pprint import pprint
import os
import os.path
data = enumgen.fetch_csv(
"http://www.iana.org/assignments/dns-parameters/dns-parameters-2.csv")
data_dict = []
for row in data:
if '-' in row[0]: continue
if ' ' in row[2]:
row[2] = row[2].split(' ')[-1].strip('()')
if row[2] in [i['name'] for i in data_dict]: continue
data_dict.append({
"name": row[2],
"value": int(row[0]),
"desc": row[3],
})
print(enumgen.render_template(
os.path.join(os.path.dirname(__file__), 'templates'),
'enum_with_tests.rs.jinja2',
ename='RRClass',
rep='u16',
default='Reserved',
default_val=0,
variants=data_dict,
))
|
Add RRClass generation script to git
|
Add RRClass generation script to git
|
Python
|
bsd-2-clause
|
oko/rust-dns
|
Add RRClass generation script to git
|
import enumgen
from pprint import pprint
import os
import os.path
data = enumgen.fetch_csv(
"http://www.iana.org/assignments/dns-parameters/dns-parameters-2.csv")
data_dict = []
for row in data:
if '-' in row[0]: continue
if ' ' in row[2]:
row[2] = row[2].split(' ')[-1].strip('()')
if row[2] in [i['name'] for i in data_dict]: continue
data_dict.append({
"name": row[2],
"value": int(row[0]),
"desc": row[3],
})
print(enumgen.render_template(
os.path.join(os.path.dirname(__file__), 'templates'),
'enum_with_tests.rs.jinja2',
ename='RRClass',
rep='u16',
default='Reserved',
default_val=0,
variants=data_dict,
))
|
<commit_before><commit_msg>Add RRClass generation script to git<commit_after>
|
import enumgen
from pprint import pprint
import os
import os.path
data = enumgen.fetch_csv(
"http://www.iana.org/assignments/dns-parameters/dns-parameters-2.csv")
data_dict = []
for row in data:
if '-' in row[0]: continue
if ' ' in row[2]:
row[2] = row[2].split(' ')[-1].strip('()')
if row[2] in [i['name'] for i in data_dict]: continue
data_dict.append({
"name": row[2],
"value": int(row[0]),
"desc": row[3],
})
print(enumgen.render_template(
os.path.join(os.path.dirname(__file__), 'templates'),
'enum_with_tests.rs.jinja2',
ename='RRClass',
rep='u16',
default='Reserved',
default_val=0,
variants=data_dict,
))
|
Add RRClass generation script to gitimport enumgen
from pprint import pprint
import os
import os.path
data = enumgen.fetch_csv(
"http://www.iana.org/assignments/dns-parameters/dns-parameters-2.csv")
data_dict = []
for row in data:
if '-' in row[0]: continue
if ' ' in row[2]:
row[2] = row[2].split(' ')[-1].strip('()')
if row[2] in [i['name'] for i in data_dict]: continue
data_dict.append({
"name": row[2],
"value": int(row[0]),
"desc": row[3],
})
print(enumgen.render_template(
os.path.join(os.path.dirname(__file__), 'templates'),
'enum_with_tests.rs.jinja2',
ename='RRClass',
rep='u16',
default='Reserved',
default_val=0,
variants=data_dict,
))
|
<commit_before><commit_msg>Add RRClass generation script to git<commit_after>import enumgen
from pprint import pprint
import os
import os.path
data = enumgen.fetch_csv(
"http://www.iana.org/assignments/dns-parameters/dns-parameters-2.csv")
data_dict = []
for row in data:
if '-' in row[0]: continue
if ' ' in row[2]:
row[2] = row[2].split(' ')[-1].strip('()')
if row[2] in [i['name'] for i in data_dict]: continue
data_dict.append({
"name": row[2],
"value": int(row[0]),
"desc": row[3],
})
print(enumgen.render_template(
os.path.join(os.path.dirname(__file__), 'templates'),
'enum_with_tests.rs.jinja2',
ename='RRClass',
rep='u16',
default='Reserved',
default_val=0,
variants=data_dict,
))
|
|
919f8bb14ae91e37e42ff08192df24b60135596f
|
python_programming/what_is_my_name.py
|
python_programming/what_is_my_name.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:25:24
###############################################################################
name = 'roger'
x = 0
while x < 3:
guess = input("What's my name? ")
if (guess != name):
print ("Wrong")
x += 1
if (x == 3):
print ("You've reached the max attempt!")
else:
print ("Correct")
break
|
Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.
|
Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.
|
Python
|
mit
|
zeroonegit/python,QuinceySun/Python,QuinceySun/Python,zeroonegit/python
|
Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:25:24
###############################################################################
name = 'roger'
x = 0
while x < 3:
guess = input("What's my name? ")
if (guess != name):
print ("Wrong")
x += 1
if (x == 3):
print ("You've reached the max attempt!")
else:
print ("Correct")
break
|
<commit_before><commit_msg>Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:25:24
###############################################################################
name = 'roger'
x = 0
while x < 3:
guess = input("What's my name? ")
if (guess != name):
print ("Wrong")
x += 1
if (x == 3):
print ("You've reached the max attempt!")
else:
print ("Correct")
break
|
Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:25:24
###############################################################################
name = 'roger'
x = 0
while x < 3:
guess = input("What's my name? ")
if (guess != name):
print ("Wrong")
x += 1
if (x == 3):
print ("You've reached the max attempt!")
else:
print ("Correct")
break
|
<commit_before><commit_msg>Write a program that has a user guess your name, but they only get 3 chances to do so until the program quits.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:25:24
###############################################################################
name = 'roger'
x = 0
while x < 3:
guess = input("What's my name? ")
if (guess != name):
print ("Wrong")
x += 1
if (x == 3):
print ("You've reached the max attempt!")
else:
print ("Correct")
break
|
|
5ef92912ca5d4a3bd3469aae7cfd5f4eca1055f7
|
cibopath/templates.py
|
cibopath/templates.py
|
# -*- coding: utf-8 -*-
import json
import logging
import pathlib
GITHUB_URL = 'https://github.com/{user}/{repo}'
TEMPLATE_DIR = pathlib.Path.home() / '.cibopath'
TEMPLATE_DIR.mkdir(exist_ok=True)
JSON_STORE = TEMPLATE_DIR / 'templates.json'
logger = logging.getLogger('cibopath')
class Template:
def __init__(self, name, author, repo, context, tags):
self.name = name
self.author = author
self.repo = repo
self.context = context
self.tags = sorted(tags)
def __repr__(self):
return '<Template {name}>'.format(name=self.name)
def __contains__(self, tag):
return tag in self.tags
@property
def url(self):
return GITHUB_URL.format(user=self.author, repo=self.repo)
def template_to_json(python_object):
if isinstance(python_object, Template):
return {
'__class__': 'Template',
'name': python_object.name,
'author': python_object.author,
'repo': python_object.repo,
'context': python_object.context,
'tags': python_object.tags,
}
raise TypeError
def template_from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Template':
author = json_object['author']
name = json_object['name']
repo = json_object['repo']
context = json_object['context']
tags = json_object['tags']
return Template(name, author, repo, context, tags)
return json_object
def dump(templates, file_path=JSON_STORE):
logger.debug('Dumping templates to {}'.format(file_path))
with file_path.open('w', encoding='utf8') as f:
json.dump(templates, f, default=template_to_json)
def load(file_path=JSON_STORE):
logger.debug('Loading templates from {}'.format(file_path))
with file_path.open('r', encoding='utf8') as f:
return json.load(f, object_hook=template_from_json)
|
Create Template class and implement json handler
|
Create Template class and implement json handler
|
Python
|
bsd-3-clause
|
hackebrot/cibopath
|
Create Template class and implement json handler
|
# -*- coding: utf-8 -*-
import json
import logging
import pathlib
GITHUB_URL = 'https://github.com/{user}/{repo}'
TEMPLATE_DIR = pathlib.Path.home() / '.cibopath'
TEMPLATE_DIR.mkdir(exist_ok=True)
JSON_STORE = TEMPLATE_DIR / 'templates.json'
logger = logging.getLogger('cibopath')
class Template:
def __init__(self, name, author, repo, context, tags):
self.name = name
self.author = author
self.repo = repo
self.context = context
self.tags = sorted(tags)
def __repr__(self):
return '<Template {name}>'.format(name=self.name)
def __contains__(self, tag):
return tag in self.tags
@property
def url(self):
return GITHUB_URL.format(user=self.author, repo=self.repo)
def template_to_json(python_object):
if isinstance(python_object, Template):
return {
'__class__': 'Template',
'name': python_object.name,
'author': python_object.author,
'repo': python_object.repo,
'context': python_object.context,
'tags': python_object.tags,
}
raise TypeError
def template_from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Template':
author = json_object['author']
name = json_object['name']
repo = json_object['repo']
context = json_object['context']
tags = json_object['tags']
return Template(name, author, repo, context, tags)
return json_object
def dump(templates, file_path=JSON_STORE):
logger.debug('Dumping templates to {}'.format(file_path))
with file_path.open('w', encoding='utf8') as f:
json.dump(templates, f, default=template_to_json)
def load(file_path=JSON_STORE):
logger.debug('Loading templates from {}'.format(file_path))
with file_path.open('r', encoding='utf8') as f:
return json.load(f, object_hook=template_from_json)
|
<commit_before><commit_msg>Create Template class and implement json handler<commit_after>
|
# -*- coding: utf-8 -*-
import json
import logging
import pathlib
GITHUB_URL = 'https://github.com/{user}/{repo}'
TEMPLATE_DIR = pathlib.Path.home() / '.cibopath'
TEMPLATE_DIR.mkdir(exist_ok=True)
JSON_STORE = TEMPLATE_DIR / 'templates.json'
logger = logging.getLogger('cibopath')
class Template:
def __init__(self, name, author, repo, context, tags):
self.name = name
self.author = author
self.repo = repo
self.context = context
self.tags = sorted(tags)
def __repr__(self):
return '<Template {name}>'.format(name=self.name)
def __contains__(self, tag):
return tag in self.tags
@property
def url(self):
return GITHUB_URL.format(user=self.author, repo=self.repo)
def template_to_json(python_object):
if isinstance(python_object, Template):
return {
'__class__': 'Template',
'name': python_object.name,
'author': python_object.author,
'repo': python_object.repo,
'context': python_object.context,
'tags': python_object.tags,
}
raise TypeError
def template_from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Template':
author = json_object['author']
name = json_object['name']
repo = json_object['repo']
context = json_object['context']
tags = json_object['tags']
return Template(name, author, repo, context, tags)
return json_object
def dump(templates, file_path=JSON_STORE):
logger.debug('Dumping templates to {}'.format(file_path))
with file_path.open('w', encoding='utf8') as f:
json.dump(templates, f, default=template_to_json)
def load(file_path=JSON_STORE):
logger.debug('Loading templates from {}'.format(file_path))
with file_path.open('r', encoding='utf8') as f:
return json.load(f, object_hook=template_from_json)
|
Create Template class and implement json handler# -*- coding: utf-8 -*-
import json
import logging
import pathlib
GITHUB_URL = 'https://github.com/{user}/{repo}'
TEMPLATE_DIR = pathlib.Path.home() / '.cibopath'
TEMPLATE_DIR.mkdir(exist_ok=True)
JSON_STORE = TEMPLATE_DIR / 'templates.json'
logger = logging.getLogger('cibopath')
class Template:
def __init__(self, name, author, repo, context, tags):
self.name = name
self.author = author
self.repo = repo
self.context = context
self.tags = sorted(tags)
def __repr__(self):
return '<Template {name}>'.format(name=self.name)
def __contains__(self, tag):
return tag in self.tags
@property
def url(self):
return GITHUB_URL.format(user=self.author, repo=self.repo)
def template_to_json(python_object):
if isinstance(python_object, Template):
return {
'__class__': 'Template',
'name': python_object.name,
'author': python_object.author,
'repo': python_object.repo,
'context': python_object.context,
'tags': python_object.tags,
}
raise TypeError
def template_from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Template':
author = json_object['author']
name = json_object['name']
repo = json_object['repo']
context = json_object['context']
tags = json_object['tags']
return Template(name, author, repo, context, tags)
return json_object
def dump(templates, file_path=JSON_STORE):
logger.debug('Dumping templates to {}'.format(file_path))
with file_path.open('w', encoding='utf8') as f:
json.dump(templates, f, default=template_to_json)
def load(file_path=JSON_STORE):
logger.debug('Loading templates from {}'.format(file_path))
with file_path.open('r', encoding='utf8') as f:
return json.load(f, object_hook=template_from_json)
|
<commit_before><commit_msg>Create Template class and implement json handler<commit_after># -*- coding: utf-8 -*-
import json
import logging
import pathlib
GITHUB_URL = 'https://github.com/{user}/{repo}'
TEMPLATE_DIR = pathlib.Path.home() / '.cibopath'
TEMPLATE_DIR.mkdir(exist_ok=True)
JSON_STORE = TEMPLATE_DIR / 'templates.json'
logger = logging.getLogger('cibopath')
class Template:
def __init__(self, name, author, repo, context, tags):
self.name = name
self.author = author
self.repo = repo
self.context = context
self.tags = sorted(tags)
def __repr__(self):
return '<Template {name}>'.format(name=self.name)
def __contains__(self, tag):
return tag in self.tags
@property
def url(self):
return GITHUB_URL.format(user=self.author, repo=self.repo)
def template_to_json(python_object):
if isinstance(python_object, Template):
return {
'__class__': 'Template',
'name': python_object.name,
'author': python_object.author,
'repo': python_object.repo,
'context': python_object.context,
'tags': python_object.tags,
}
raise TypeError
def template_from_json(json_object):
if '__class__' in json_object:
if json_object['__class__'] == 'Template':
author = json_object['author']
name = json_object['name']
repo = json_object['repo']
context = json_object['context']
tags = json_object['tags']
return Template(name, author, repo, context, tags)
return json_object
def dump(templates, file_path=JSON_STORE):
logger.debug('Dumping templates to {}'.format(file_path))
with file_path.open('w', encoding='utf8') as f:
json.dump(templates, f, default=template_to_json)
def load(file_path=JSON_STORE):
logger.debug('Loading templates from {}'.format(file_path))
with file_path.open('r', encoding='utf8') as f:
return json.load(f, object_hook=template_from_json)
|
|
d9302fab69d16f556adf27248c0fa7f7d12dc2ec
|
python/opencv/opencv_2/videos/play_video_from_file.py
|
python/opencv/opencv_2/videos/play_video_from_file.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Play video: play videos from files
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#playing-video-from-file
"""
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The video file to play", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
video_capture = cv.VideoCapture(infile_str)
#framerate = 25
framerate = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(int(1000./framerate)) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).
|
Add a snippet (Python OpenCV).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python OpenCV).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Play video: play videos from files
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#playing-video-from-file
"""
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The video file to play", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
video_capture = cv.VideoCapture(infile_str)
#framerate = 25
framerate = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(int(1000./framerate)) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Play video: play videos from files
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#playing-video-from-file
"""
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The video file to play", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
video_capture = cv.VideoCapture(infile_str)
#framerate = 25
framerate = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(int(1000./framerate)) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Play video: play videos from files
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#playing-video-from-file
"""
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The video file to play", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
video_capture = cv.VideoCapture(infile_str)
#framerate = 25
framerate = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(int(1000./framerate)) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Play video: play videos from files
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#playing-video-from-file
"""
from __future__ import print_function
import cv2 as cv
import argparse
def main():
# Parse the programm options (get the path of the image file to read)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The video file to play", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
video_capture = cv.VideoCapture(infile_str)
#framerate = 25
framerate = video_capture.get(cv.cv.CV_CAP_PROP_FPS)
print("Press Q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_np = video_capture.read()
# Display the resulting frame
cv.imshow('video capture snippet', img_np)
if cv.waitKey(int(1000./framerate)) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
|
|
a5c723b589699fdf80c42a4186c2fdc0c8d84bb4
|
tests/sentry/app/tests.py
|
tests/sentry/app/tests.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
class GetBufferTest(TestCase):
@mock.patch('sentry.app.import_string')
def test_instantiates_class_with_options(self, import_string):
options = {'hello': 'world'}
path = 'lol.FooBar'
result = app.get_instance(path, options)
import_string.assert_called_once_with(path)
import_string.return_value.assert_called_once_with(**options)
assert result == import_string.return_value.return_value
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
|
Remove test that is probably more trouble than it's worth.
|
Remove test that is probably more trouble than it's worth.
|
Python
|
bsd-3-clause
|
JackDanger/sentry,mvaled/sentry,BuildingLink/sentry,alexm92/sentry,alexm92/sentry,mvaled/sentry,gencer/sentry,JamesMura/sentry,ifduyue/sentry,zenefits/sentry,jean/sentry,fotinakis/sentry,gencer/sentry,zenefits/sentry,gencer/sentry,JamesMura/sentry,zenefits/sentry,mvaled/sentry,mvaled/sentry,BuildingLink/sentry,gencer/sentry,beeftornado/sentry,fotinakis/sentry,beeftornado/sentry,mvaled/sentry,fotinakis/sentry,ifduyue/sentry,ifduyue/sentry,alexm92/sentry,looker/sentry,gencer/sentry,looker/sentry,looker/sentry,mvaled/sentry,ifduyue/sentry,BuildingLink/sentry,zenefits/sentry,BuildingLink/sentry,JackDanger/sentry,JamesMura/sentry,jean/sentry,looker/sentry,fotinakis/sentry,BuildingLink/sentry,JackDanger/sentry,JamesMura/sentry,ifduyue/sentry,jean/sentry,JamesMura/sentry,jean/sentry,jean/sentry,looker/sentry,zenefits/sentry,beeftornado/sentry
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
class GetBufferTest(TestCase):
@mock.patch('sentry.app.import_string')
def test_instantiates_class_with_options(self, import_string):
options = {'hello': 'world'}
path = 'lol.FooBar'
result = app.get_instance(path, options)
import_string.assert_called_once_with(path)
import_string.return_value.assert_called_once_with(**options)
assert result == import_string.return_value.return_value
Remove test that is probably more trouble than it's worth.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
class GetBufferTest(TestCase):
@mock.patch('sentry.app.import_string')
def test_instantiates_class_with_options(self, import_string):
options = {'hello': 'world'}
path = 'lol.FooBar'
result = app.get_instance(path, options)
import_string.assert_called_once_with(path)
import_string.return_value.assert_called_once_with(**options)
assert result == import_string.return_value.return_value
<commit_msg>Remove test that is probably more trouble than it's worth.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
class GetBufferTest(TestCase):
@mock.patch('sentry.app.import_string')
def test_instantiates_class_with_options(self, import_string):
options = {'hello': 'world'}
path = 'lol.FooBar'
result = app.get_instance(path, options)
import_string.assert_called_once_with(path)
import_string.return_value.assert_called_once_with(**options)
assert result == import_string.return_value.return_value
Remove test that is probably more trouble than it's worth.# -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
class GetBufferTest(TestCase):
@mock.patch('sentry.app.import_string')
def test_instantiates_class_with_options(self, import_string):
options = {'hello': 'world'}
path = 'lol.FooBar'
result = app.get_instance(path, options)
import_string.assert_called_once_with(path)
import_string.return_value.assert_called_once_with(**options)
assert result == import_string.return_value.return_value
<commit_msg>Remove test that is probably more trouble than it's worth.<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry import app
from sentry.testutils import TestCase
class AppTest(TestCase):
def test_buffer_is_a_buffer(self):
from sentry.buffer.base import Buffer
self.assertEquals(type(app.buffer), Buffer)
|
eef6bac6cf333b1df874381b6140c5737d5f8c57
|
pycon/dev-settings.py
|
pycon/dev-settings.py
|
from pycon.settings import *
DEFAULT_URL_PREFIX='http://localhost:8000'
DEBUG=True
PAYPAL_TEST = True
TEMPLATES[0]['OPTIONS']['debug'] = True
|
Add a basic dev config
|
Add a basic dev config
|
Python
|
bsd-2-clause
|
artcz/epcon,EuroPython/epcon,artcz/epcon,artcz/epcon,artcz/epcon,artcz/epcon,artcz/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon
|
Add a basic dev config
|
from pycon.settings import *
DEFAULT_URL_PREFIX='http://localhost:8000'
DEBUG=True
PAYPAL_TEST = True
TEMPLATES[0]['OPTIONS']['debug'] = True
|
<commit_before><commit_msg>Add a basic dev config<commit_after>
|
from pycon.settings import *
DEFAULT_URL_PREFIX='http://localhost:8000'
DEBUG=True
PAYPAL_TEST = True
TEMPLATES[0]['OPTIONS']['debug'] = True
|
Add a basic dev configfrom pycon.settings import *
DEFAULT_URL_PREFIX='http://localhost:8000'
DEBUG=True
PAYPAL_TEST = True
TEMPLATES[0]['OPTIONS']['debug'] = True
|
<commit_before><commit_msg>Add a basic dev config<commit_after>from pycon.settings import *
DEFAULT_URL_PREFIX='http://localhost:8000'
DEBUG=True
PAYPAL_TEST = True
TEMPLATES[0]['OPTIONS']['debug'] = True
|
|
6fc5e78136285370d7606f31816b0fdf3a58606e
|
print_bot_id.py
|
print_bot_id.py
|
import os
from slackclient import SlackClient
# Update with your bot's name
BOT_NAME = 'watsonbot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
Add script to print bot id
|
Add script to print bot id
|
Python
|
apache-2.0
|
kostickm/watsonbot
|
Add script to print bot id
|
import os
from slackclient import SlackClient
# Update with your bot's name
BOT_NAME = 'watsonbot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
<commit_before><commit_msg>Add script to print bot id<commit_after>
|
import os
from slackclient import SlackClient
# Update with your bot's name
BOT_NAME = 'watsonbot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
Add script to print bot idimport os
from slackclient import SlackClient
# Update with your bot's name
BOT_NAME = 'watsonbot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
<commit_before><commit_msg>Add script to print bot id<commit_after>import os
from slackclient import SlackClient
# Update with your bot's name
BOT_NAME = 'watsonbot'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
else:
print("could not find bot user with the name " + BOT_NAME)
|
|
44de127d69cd00dfc636e2f8f2cdf42f0e85b7d4
|
remoting/PRESUBMIT.py
|
remoting/PRESUBMIT.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for remoting.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
BROWSER_TEST_INSTRUCTIONS_LINK = (
"https://wiki.corp.google.com/twiki/bin/view/Main/ChromotingWaterfall#Running_on_a_Swarming_bot.")
def CheckChangeOnUpload(input_api, output_api):
print "*******IMPORTANT NOTE*******"
print "Before committing, please run Remoting browser_tests."
print "Instructions: %s" % BROWSER_TEST_INSTRUCTIONS_LINK
print "Make sure all tests pass."
return []
def CheckChangeOnCommit(input_api, output_api):
"""TODO(anandc): Run browser-tests on the Chromoting waterfall as part of
committing a CL. See http://crbug/498026"""
return []
|
Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
|
Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
BUG=
Review URL: https://codereview.chromium.org/1166123004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333390}
|
Python
|
bsd-3-clause
|
axinging/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,Just-D/chromium-1,axinging/chromium-crosswalk,chuan9/chromium-crosswalk
|
Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
BUG=
Review URL: https://codereview.chromium.org/1166123004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333390}
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for remoting.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
BROWSER_TEST_INSTRUCTIONS_LINK = (
"https://wiki.corp.google.com/twiki/bin/view/Main/ChromotingWaterfall#Running_on_a_Swarming_bot.")
def CheckChangeOnUpload(input_api, output_api):
print "*******IMPORTANT NOTE*******"
print "Before committing, please run Remoting browser_tests."
print "Instructions: %s" % BROWSER_TEST_INSTRUCTIONS_LINK
print "Make sure all tests pass."
return []
def CheckChangeOnCommit(input_api, output_api):
"""TODO(anandc): Run browser-tests on the Chromoting waterfall as part of
committing a CL. See http://crbug/498026"""
return []
|
<commit_before><commit_msg>Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
BUG=
Review URL: https://codereview.chromium.org/1166123004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333390}<commit_after>
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for remoting.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
BROWSER_TEST_INSTRUCTIONS_LINK = (
"https://wiki.corp.google.com/twiki/bin/view/Main/ChromotingWaterfall#Running_on_a_Swarming_bot.")
def CheckChangeOnUpload(input_api, output_api):
print "*******IMPORTANT NOTE*******"
print "Before committing, please run Remoting browser_tests."
print "Instructions: %s" % BROWSER_TEST_INSTRUCTIONS_LINK
print "Make sure all tests pass."
return []
def CheckChangeOnCommit(input_api, output_api):
"""TODO(anandc): Run browser-tests on the Chromoting waterfall as part of
committing a CL. See http://crbug/498026"""
return []
|
Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
BUG=
Review URL: https://codereview.chromium.org/1166123004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333390}# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for remoting.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
BROWSER_TEST_INSTRUCTIONS_LINK = (
"https://wiki.corp.google.com/twiki/bin/view/Main/ChromotingWaterfall#Running_on_a_Swarming_bot.")
def CheckChangeOnUpload(input_api, output_api):
print "*******IMPORTANT NOTE*******"
print "Before committing, please run Remoting browser_tests."
print "Instructions: %s" % BROWSER_TEST_INSTRUCTIONS_LINK
print "Make sure all tests pass."
return []
def CheckChangeOnCommit(input_api, output_api):
"""TODO(anandc): Run browser-tests on the Chromoting waterfall as part of
committing a CL. See http://crbug/498026"""
return []
|
<commit_before><commit_msg>Print out a note, during change upload, on running Remoting browser-tests before commiting changes under $src/remoting.
BUG=
Review URL: https://codereview.chromium.org/1166123004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333390}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for remoting.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
BROWSER_TEST_INSTRUCTIONS_LINK = (
"https://wiki.corp.google.com/twiki/bin/view/Main/ChromotingWaterfall#Running_on_a_Swarming_bot.")
def CheckChangeOnUpload(input_api, output_api):
print "*******IMPORTANT NOTE*******"
print "Before committing, please run Remoting browser_tests."
print "Instructions: %s" % BROWSER_TEST_INSTRUCTIONS_LINK
print "Make sure all tests pass."
return []
def CheckChangeOnCommit(input_api, output_api):
"""TODO(anandc): Run browser-tests on the Chromoting waterfall as part of
committing a CL. See http://crbug/498026"""
return []
|
|
89f6654f921d2a373a3cbb0f521f2dbbb31f3de8
|
research/test_diff.py
|
research/test_diff.py
|
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=5):
from sympycore import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=5):
from sympy import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
#print 'time2:',t2-t1
return 100 / (t2-t1)
def time3(n=5):
import swiginac
x,y,z = map(swiginac.symbol,'xyz')
f = (x / (1+swiginac.sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
print 'time3:',t2-t1
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time3()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time1(5)')
|
Add diff bench test script.
|
Add diff bench test script.
|
Python
|
bsd-3-clause
|
pearu/sympycore,pearu/sympycore
|
Add diff bench test script.
|
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=5):
from sympycore import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=5):
from sympy import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
#print 'time2:',t2-t1
return 100 / (t2-t1)
def time3(n=5):
import swiginac
x,y,z = map(swiginac.symbol,'xyz')
f = (x / (1+swiginac.sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
print 'time3:',t2-t1
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time3()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time1(5)')
|
<commit_before><commit_msg>Add diff bench test script.<commit_after>
|
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=5):
from sympycore import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=5):
from sympy import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
#print 'time2:',t2-t1
return 100 / (t2-t1)
def time3(n=5):
import swiginac
x,y,z = map(swiginac.symbol,'xyz')
f = (x / (1+swiginac.sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
print 'time3:',t2-t1
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time3()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time1(5)')
|
Add diff bench test script.
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=5):
from sympycore import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=5):
from sympy import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
#print 'time2:',t2-t1
return 100 / (t2-t1)
def time3(n=5):
import swiginac
x,y,z = map(swiginac.symbol,'xyz')
f = (x / (1+swiginac.sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
print 'time3:',t2-t1
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time3()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time1(5)')
|
<commit_before><commit_msg>Add diff bench test script.<commit_after>
# use time() instead on unix
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
from sympycore import profile_expr
def time1(n=5):
from sympycore import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
return 100 / (t2-t1)
def time2(n=5):
from sympy import Symbol, sin
x,y,z = map(Symbol,'xyz')
f = (x / (1+sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
#print 'time2:',t2-t1
return 100 / (t2-t1)
def time3(n=5):
import swiginac
x,y,z = map(swiginac.symbol,'xyz')
f = (x / (1+swiginac.sin(x**(y+x**2)))**2)
t1 = clock()
while n:
f = f.diff(x)
n -= 1
t2 = clock()
print 'time3:',t2-t1
return 100 / (t2-t1)
def timing():
t1 = time1()
t2 = time3()
return t1, t2, t1/t2
print timing()
print timing()
print timing()
profile_expr('time1(5)')
|
|
7794500c352b0a2f536bbac7be07837d8fa0efef
|
tests/test_Projection.py
|
tests/test_Projection.py
|
import unittest
import numpy
import dadi
class ProjectionTestCase(unittest.TestCase):
def test_project_up(self):
"""
Saving spectrum to file.
"""
fixed_params = [0.1,None,None]
params_up = dadi.Inference._project_params_up([0.2,0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up([0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up(0.3, fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
suite = unittest.TestLoader().loadTestsFromTestCase(ProjectionTestCase)
unittest.main()
|
Add test for projecting parameters upward.
|
Add test for projecting parameters upward.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@489 979d6bd5-6d4d-0410-bece-f567c23bd345
|
Python
|
bsd-3-clause
|
RyanGutenkunst/dadi,niuhuifei/dadi,beni55/dadi,beni55/dadi,yangjl/dadi,cheese1213/dadi,ChenHsiang/dadi,cheese1213/dadi,paulirish/dadi,niuhuifei/dadi,paulirish/dadi,ChenHsiang/dadi,yangjl/dadi,RyanGutenkunst/dadi
|
Add test for projecting parameters upward.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@489 979d6bd5-6d4d-0410-bece-f567c23bd345
|
import unittest
import numpy
import dadi
class ProjectionTestCase(unittest.TestCase):
def test_project_up(self):
"""
Saving spectrum to file.
"""
fixed_params = [0.1,None,None]
params_up = dadi.Inference._project_params_up([0.2,0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up([0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up(0.3, fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
suite = unittest.TestLoader().loadTestsFromTestCase(ProjectionTestCase)
unittest.main()
|
<commit_before><commit_msg>Add test for projecting parameters upward.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@489 979d6bd5-6d4d-0410-bece-f567c23bd345<commit_after>
|
import unittest
import numpy
import dadi
class ProjectionTestCase(unittest.TestCase):
def test_project_up(self):
"""
Saving spectrum to file.
"""
fixed_params = [0.1,None,None]
params_up = dadi.Inference._project_params_up([0.2,0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up([0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up(0.3, fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
suite = unittest.TestLoader().loadTestsFromTestCase(ProjectionTestCase)
unittest.main()
|
Add test for projecting parameters upward.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@489 979d6bd5-6d4d-0410-bece-f567c23bd345import unittest
import numpy
import dadi
class ProjectionTestCase(unittest.TestCase):
def test_project_up(self):
"""
Saving spectrum to file.
"""
fixed_params = [0.1,None,None]
params_up = dadi.Inference._project_params_up([0.2,0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up([0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up(0.3, fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
suite = unittest.TestLoader().loadTestsFromTestCase(ProjectionTestCase)
unittest.main()
|
<commit_before><commit_msg>Add test for projecting parameters upward.
git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@489 979d6bd5-6d4d-0410-bece-f567c23bd345<commit_after>import unittest
import numpy
import dadi
class ProjectionTestCase(unittest.TestCase):
def test_project_up(self):
"""
Saving spectrum to file.
"""
fixed_params = [0.1,None,None]
params_up = dadi.Inference._project_params_up([0.2,0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up([0.3], fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
fixed_params = [0.1,0.2,None]
params_up = dadi.Inference._project_params_up(0.3, fixed_params)
self.assertTrue(numpy.allclose(params_up, [0.1,0.2,0.3]))
suite = unittest.TestLoader().loadTestsFromTestCase(ProjectionTestCase)
unittest.main()
|
|
c95d352e16a7638d037a831fcc55a9145f6c9162
|
tests/test_twr_search.py
|
tests/test_twr_search.py
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_search import TwrSearch
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase1_failed_cb(search, info):
print '[FAILED] phase1: tweets-downloaded-failed, with %s' % info
loop.quit()
def __phase1_cb(search, info):
print '[OK] phase1: tweets-downloaded, count: %s' % len(info['statuses'])
loop.quit()
search = TwrSearch()
search.connect('tweets-downloaded', __phase1_cb)
search.connect('tweets-downloaded-failed', __phase1_failed_cb)
search.tweets('@tchx84', count=1)
loop = GObject.MainLoop()
loop.run()
|
Add TwrSearch class basic test
|
Add TwrSearch class basic test
|
Python
|
lgpl-2.1
|
tchx84/twitter-gobject
|
Add TwrSearch class basic test
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_search import TwrSearch
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase1_failed_cb(search, info):
print '[FAILED] phase1: tweets-downloaded-failed, with %s' % info
loop.quit()
def __phase1_cb(search, info):
print '[OK] phase1: tweets-downloaded, count: %s' % len(info['statuses'])
loop.quit()
search = TwrSearch()
search.connect('tweets-downloaded', __phase1_cb)
search.connect('tweets-downloaded-failed', __phase1_failed_cb)
search.tweets('@tchx84', count=1)
loop = GObject.MainLoop()
loop.run()
|
<commit_before><commit_msg>Add TwrSearch class basic test<commit_after>
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_search import TwrSearch
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase1_failed_cb(search, info):
print '[FAILED] phase1: tweets-downloaded-failed, with %s' % info
loop.quit()
def __phase1_cb(search, info):
print '[OK] phase1: tweets-downloaded, count: %s' % len(info['statuses'])
loop.quit()
search = TwrSearch()
search.connect('tweets-downloaded', __phase1_cb)
search.connect('tweets-downloaded-failed', __phase1_failed_cb)
search.tweets('@tchx84', count=1)
loop = GObject.MainLoop()
loop.run()
|
Add TwrSearch class basic test#!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_search import TwrSearch
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase1_failed_cb(search, info):
print '[FAILED] phase1: tweets-downloaded-failed, with %s' % info
loop.quit()
def __phase1_cb(search, info):
print '[OK] phase1: tweets-downloaded, count: %s' % len(info['statuses'])
loop.quit()
search = TwrSearch()
search.connect('tweets-downloaded', __phase1_cb)
search.connect('tweets-downloaded-failed', __phase1_failed_cb)
search.tweets('@tchx84', count=1)
loop = GObject.MainLoop()
loop.run()
|
<commit_before><commit_msg>Add TwrSearch class basic test<commit_after>#!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
from gi.repository import GObject
sys.path.append("..")
from twitter.twr_search import TwrSearch
from twitter.twr_account import TwrAccount
consumer_key = ''
consumer_secret = ''
access_key = ''
access_secret = ''
TwrAccount.set_secrets(consumer_key, consumer_secret,
access_key, access_secret)
def __phase1_failed_cb(search, info):
print '[FAILED] phase1: tweets-downloaded-failed, with %s' % info
loop.quit()
def __phase1_cb(search, info):
print '[OK] phase1: tweets-downloaded, count: %s' % len(info['statuses'])
loop.quit()
search = TwrSearch()
search.connect('tweets-downloaded', __phase1_cb)
search.connect('tweets-downloaded-failed', __phase1_failed_cb)
search.tweets('@tchx84', count=1)
loop = GObject.MainLoop()
loop.run()
|
|
934a7d166eb74fcd2ee3eaef79b63fabd61dc45c
|
buildscripts/run_benchmarks.py
|
buildscripts/run_benchmarks.py
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import os
import shutil
from pathlib import Path
from utilities import SDC_Build_Utilities
def run_benchmarks(sdc_utils, module_list, num_threads_list):
os.chdir(str(sdc_utils.src_path.parent))
for module in module_list:
for num_threads in num_threads_list:
os.environ['NUMBA_NUM_THREADS'] = num_threads
sdc_utils.log_info(f'Run Intel SDC benchmarks on {num_threads} threads', separate=True)
run_command(f'python -W ignore -m sdc.runtests {module}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--python', default='3.7', choices=['3.6', '3.7', '3.8'],
help='Python version, default = 3.7')
parser.add_argument('--sdc-channel', default=None, help='Intel SDC channel')
parser.add_argument('--module-list', required=True, nargs='+', help='List of performance modules to test')
parser.add_argument('--num-threads-list', required=True, nargs='+',
help='List of values for NUMBA_NUM_THREADS env variable')
args = parser.parse_args()
sdc_utils = SDC_Build_Utilities(args.python, args.sdc_channel)
sdc_utils.log_info('Run Intel(R) SDC benchmarks', separate=True)
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.create_environment(['scipy', 'openpyxl', 'xlrd'])
sdc_utils.install_conda_package(['sdc'])
run_benchmarks(sdc_utils, args.module_list, args.num_threads_list)
|
Add simple script for benchmark execution
|
Add simple script for benchmark execution
|
Python
|
bsd-2-clause
|
IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat
|
Add simple script for benchmark execution
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import os
import shutil
from pathlib import Path
from utilities import SDC_Build_Utilities
def run_benchmarks(sdc_utils, module_list, num_threads_list):
os.chdir(str(sdc_utils.src_path.parent))
for module in module_list:
for num_threads in num_threads_list:
os.environ['NUMBA_NUM_THREADS'] = num_threads
sdc_utils.log_info(f'Run Intel SDC benchmarks on {num_threads} threads', separate=True)
run_command(f'python -W ignore -m sdc.runtests {module}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--python', default='3.7', choices=['3.6', '3.7', '3.8'],
help='Python version, default = 3.7')
parser.add_argument('--sdc-channel', default=None, help='Intel SDC channel')
parser.add_argument('--module-list', required=True, nargs='+', help='List of performance modules to test')
parser.add_argument('--num-threads-list', required=True, nargs='+',
help='List of values for NUMBA_NUM_THREADS env variable')
args = parser.parse_args()
sdc_utils = SDC_Build_Utilities(args.python, args.sdc_channel)
sdc_utils.log_info('Run Intel(R) SDC benchmarks', separate=True)
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.create_environment(['scipy', 'openpyxl', 'xlrd'])
sdc_utils.install_conda_package(['sdc'])
run_benchmarks(sdc_utils, args.module_list, args.num_threads_list)
|
<commit_before><commit_msg>Add simple script for benchmark execution<commit_after>
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import os
import shutil
from pathlib import Path
from utilities import SDC_Build_Utilities
def run_benchmarks(sdc_utils, module_list, num_threads_list):
os.chdir(str(sdc_utils.src_path.parent))
for module in module_list:
for num_threads in num_threads_list:
os.environ['NUMBA_NUM_THREADS'] = num_threads
sdc_utils.log_info(f'Run Intel SDC benchmarks on {num_threads} threads', separate=True)
run_command(f'python -W ignore -m sdc.runtests {module}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--python', default='3.7', choices=['3.6', '3.7', '3.8'],
help='Python version, default = 3.7')
parser.add_argument('--sdc-channel', default=None, help='Intel SDC channel')
parser.add_argument('--module-list', required=True, nargs='+', help='List of performance modules to test')
parser.add_argument('--num-threads-list', required=True, nargs='+',
help='List of values for NUMBA_NUM_THREADS env variable')
args = parser.parse_args()
sdc_utils = SDC_Build_Utilities(args.python, args.sdc_channel)
sdc_utils.log_info('Run Intel(R) SDC benchmarks', separate=True)
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.create_environment(['scipy', 'openpyxl', 'xlrd'])
sdc_utils.install_conda_package(['sdc'])
run_benchmarks(sdc_utils, args.module_list, args.num_threads_list)
|
Add simple script for benchmark execution# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import os
import shutil
from pathlib import Path
from utilities import SDC_Build_Utilities
def run_benchmarks(sdc_utils, module_list, num_threads_list):
os.chdir(str(sdc_utils.src_path.parent))
for module in module_list:
for num_threads in num_threads_list:
os.environ['NUMBA_NUM_THREADS'] = num_threads
sdc_utils.log_info(f'Run Intel SDC benchmarks on {num_threads} threads', separate=True)
run_command(f'python -W ignore -m sdc.runtests {module}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--python', default='3.7', choices=['3.6', '3.7', '3.8'],
help='Python version, default = 3.7')
parser.add_argument('--sdc-channel', default=None, help='Intel SDC channel')
parser.add_argument('--module-list', required=True, nargs='+', help='List of performance modules to test')
parser.add_argument('--num-threads-list', required=True, nargs='+',
help='List of values for NUMBA_NUM_THREADS env variable')
args = parser.parse_args()
sdc_utils = SDC_Build_Utilities(args.python, args.sdc_channel)
sdc_utils.log_info('Run Intel(R) SDC benchmarks', separate=True)
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.create_environment(['scipy', 'openpyxl', 'xlrd'])
sdc_utils.install_conda_package(['sdc'])
run_benchmarks(sdc_utils, args.module_list, args.num_threads_list)
|
<commit_before><commit_msg>Add simple script for benchmark execution<commit_after># *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import os
import shutil
from pathlib import Path
from utilities import SDC_Build_Utilities
def run_benchmarks(sdc_utils, module_list, num_threads_list):
os.chdir(str(sdc_utils.src_path.parent))
for module in module_list:
for num_threads in num_threads_list:
os.environ['NUMBA_NUM_THREADS'] = num_threads
sdc_utils.log_info(f'Run Intel SDC benchmarks on {num_threads} threads', separate=True)
run_command(f'python -W ignore -m sdc.runtests {module}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--python', default='3.7', choices=['3.6', '3.7', '3.8'],
help='Python version, default = 3.7')
parser.add_argument('--sdc-channel', default=None, help='Intel SDC channel')
parser.add_argument('--module-list', required=True, nargs='+', help='List of performance modules to test')
parser.add_argument('--num-threads-list', required=True, nargs='+',
help='List of values for NUMBA_NUM_THREADS env variable')
args = parser.parse_args()
sdc_utils = SDC_Build_Utilities(args.python, args.sdc_channel)
sdc_utils.log_info('Run Intel(R) SDC benchmarks', separate=True)
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.create_environment(['scipy', 'openpyxl', 'xlrd'])
sdc_utils.install_conda_package(['sdc'])
run_benchmarks(sdc_utils, args.module_list, args.num_threads_list)
|
|
c0ba8348f614f2ef6c14db9335ba3d1a6f3d29af
|
p3/management/commands/create_bulk_coupons.py
|
p3/management/commands/create_bulk_coupons.py
|
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
|
Add new coupon script to generate coupons using a CSV file.
|
Add new coupon script to generate coupons using a CSV file.
|
Python
|
bsd-2-clause
|
EuroPython/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon
|
Add new coupon script to generate coupons using a CSV file.
|
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
|
<commit_before><commit_msg>Add new coupon script to generate coupons using a CSV file.<commit_after>
|
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
|
Add new coupon script to generate coupons using a CSV file.
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
|
<commit_before><commit_msg>Add new coupon script to generate coupons using a CSV file.<commit_after>
""" Create a batch of single use discount coupons from a CSV file.
Parameters: <conference> <csv-file>
Creates coupons based on the CSV file contents:
code - coupon code
max_usage - max. number of uses
items_per_usage - max number of items per use
value - value of the coupon in percent
description - description
fares - comma separated list of included fares
Use --dry-run to test drive the script.
"""
import sys
import csv
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models as cmodels
from assopy.models import Coupon
###
class Command(BaseCommand):
args = '<conference> <count>'
# Dry run ?
dry_run = False
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('conference')
parser.add_argument('csv')
# Named (optional) arguments
parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do everything except create the coupons')
@transaction.atomic
def handle(self, *args, **options):
conference = cmodels.Conference.objects.get(code=options['conference'])
self.dry_run = options.get('dry_run', False)
csv_filename = options['csv']
# Get set of existing coupon codes
all_codes = set(c['code'] for c in Coupon.objects\
.filter(conference=conference.code)\
.values('code'))
# Valid fares (conference fares only)
all_fares = cmodels.Fare.objects\
.filter(conference=conference.code)
# Create coupons
if csv_filename == 'stdin':
csv_file = sys.stdin
else:
csv_file = open(csv_filename)
with csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
code = row['code'].strip()
if not code:
# Skip lines without code
continue
if code in all_codes:
# Skip coupons which already exist
print ('Coupon %r already exists - skipping' % code)
continue
c = Coupon(conference=conference)
c.code = code
c.max_usage = int(row.get('max_usage', 1))
c.items_per_usage = int(row.get('items_per_usage', 1))
c.value = row['value']
c.description = row.get('description', '')
if not self.dry_run:
c.save()
c.fares = all_fares.filter(
code__in = [x.strip()
for x in row['fares'].split(',')])
print ('Coupond %r created' % c.code)
|
|
d4840c794f9d9de252028a7899d470753615d3d5
|
eve_neo4j/structures.py
|
eve_neo4j/structures.py
|
# -*- coding: utf-8 -*-
from eve_neo4j.utils import node_to_dict, count_selection
class Neo4jResultCollection(object):
"""
Collection of results. The object holds onto a py2neo-NodeSelection
object and serves a generator off it.
:param selection: NodeSelection object for the requested resource.
"""
def __init__(self, selection, **kwargs):
self._selection = selection
def __iter__(self):
for node in self._selection:
yield node_to_dict(node)
def count(self, with_limit_and_skip=False, **kwargs):
return count_selection(self._selection, with_limit_and_skip)
|
Create Node holder for Eve.
|
Create Node holder for Eve.
|
Python
|
mit
|
Grupo-Abraxas/eve-neo4j,Abraxas-Biosystems/eve-neo4j
|
Create Node holder for Eve.
|
# -*- coding: utf-8 -*-
from eve_neo4j.utils import node_to_dict, count_selection
class Neo4jResultCollection(object):
"""
Collection of results. The object holds onto a py2neo-NodeSelection
object and serves a generator off it.
:param selection: NodeSelection object for the requested resource.
"""
def __init__(self, selection, **kwargs):
self._selection = selection
def __iter__(self):
for node in self._selection:
yield node_to_dict(node)
def count(self, with_limit_and_skip=False, **kwargs):
return count_selection(self._selection, with_limit_and_skip)
|
<commit_before><commit_msg>Create Node holder for Eve.<commit_after>
|
# -*- coding: utf-8 -*-
from eve_neo4j.utils import node_to_dict, count_selection
class Neo4jResultCollection(object):
"""
Collection of results. The object holds onto a py2neo-NodeSelection
object and serves a generator off it.
:param selection: NodeSelection object for the requested resource.
"""
def __init__(self, selection, **kwargs):
self._selection = selection
def __iter__(self):
for node in self._selection:
yield node_to_dict(node)
def count(self, with_limit_and_skip=False, **kwargs):
return count_selection(self._selection, with_limit_and_skip)
|
Create Node holder for Eve.# -*- coding: utf-8 -*-
from eve_neo4j.utils import node_to_dict, count_selection
class Neo4jResultCollection(object):
"""
Collection of results. The object holds onto a py2neo-NodeSelection
object and serves a generator off it.
:param selection: NodeSelection object for the requested resource.
"""
def __init__(self, selection, **kwargs):
self._selection = selection
def __iter__(self):
for node in self._selection:
yield node_to_dict(node)
def count(self, with_limit_and_skip=False, **kwargs):
return count_selection(self._selection, with_limit_and_skip)
|
<commit_before><commit_msg>Create Node holder for Eve.<commit_after># -*- coding: utf-8 -*-
from eve_neo4j.utils import node_to_dict, count_selection
class Neo4jResultCollection(object):
"""
Collection of results. The object holds onto a py2neo-NodeSelection
object and serves a generator off it.
:param selection: NodeSelection object for the requested resource.
"""
def __init__(self, selection, **kwargs):
self._selection = selection
def __iter__(self):
for node in self._selection:
yield node_to_dict(node)
def count(self, with_limit_and_skip=False, **kwargs):
return count_selection(self._selection, with_limit_and_skip)
|
|
d9bcab7e07ef4d9c44e0dfa0bcf49510b954ae71
|
examples/test_mirror.py
|
examples/test_mirror.py
|
import pypiv
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def main():
imgs = ['images/finger1.png', 'images/finger2.png']
frames = [plt.imread(x) for x in imgs]
frame_a, frame_b = frames[0], frames[1]
frame_a_inv = np.copy(frame_a[::-1,::-1])
frame_b_inv = np.copy(frame_b[::-1,::-1])
piv = do_piv(frame_a, frame_b)
piv_inv = do_piv(frame_a_inv, frame_b_inv)
u = piv.u
ui = piv_inv.u[::-1, ::-1]
u[np.isnan(u)] = 1000.
ui[np.isnan(ui)] = -1000.
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(u, interpolation='nearest')
ax2.imshow(-ui, interpolation='nearest')
print np.sum(u+ui)
plt.show()
def do_piv(frame_a, frame_b):
#PIV1
piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV2
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV3
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=8)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
return piv
if __name__=='__main__':
main()
|
Add example for inverted image comparison.
|
Add example for inverted image comparison.
An additional example which tests if
a rotation of the testimages along both axis still
yields the same results after a PIV evaluation.
Please note that this works, except for the outlier detection
which is probably due to an asymmetry in the
CloughTocherInterpolator function from scipy.interpolate.
|
Python
|
bsd-3-clause
|
jr7/pypiv
|
Add example for inverted image comparison.
An additional example which tests if
a rotation of the testimages along both axis still
yields the same results after a PIV evaluation.
Please note that this works, except for the outlier detection
which is probably due to an asymmetry in the
CloughTocherInterpolator function from scipy.interpolate.
|
import pypiv
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def main():
imgs = ['images/finger1.png', 'images/finger2.png']
frames = [plt.imread(x) for x in imgs]
frame_a, frame_b = frames[0], frames[1]
frame_a_inv = np.copy(frame_a[::-1,::-1])
frame_b_inv = np.copy(frame_b[::-1,::-1])
piv = do_piv(frame_a, frame_b)
piv_inv = do_piv(frame_a_inv, frame_b_inv)
u = piv.u
ui = piv_inv.u[::-1, ::-1]
u[np.isnan(u)] = 1000.
ui[np.isnan(ui)] = -1000.
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(u, interpolation='nearest')
ax2.imshow(-ui, interpolation='nearest')
print np.sum(u+ui)
plt.show()
def do_piv(frame_a, frame_b):
#PIV1
piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV2
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV3
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=8)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
return piv
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Add example for inverted image comparison.
An additional example which tests if
a rotation of the testimages along both axis still
yields the same results after a PIV evaluation.
Please note that this works, except for the outlier detection
which is probably due to an asymmetry in the
CloughTocherInterpolator function from scipy.interpolate.<commit_after>
|
import pypiv
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def main():
imgs = ['images/finger1.png', 'images/finger2.png']
frames = [plt.imread(x) for x in imgs]
frame_a, frame_b = frames[0], frames[1]
frame_a_inv = np.copy(frame_a[::-1,::-1])
frame_b_inv = np.copy(frame_b[::-1,::-1])
piv = do_piv(frame_a, frame_b)
piv_inv = do_piv(frame_a_inv, frame_b_inv)
u = piv.u
ui = piv_inv.u[::-1, ::-1]
u[np.isnan(u)] = 1000.
ui[np.isnan(ui)] = -1000.
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(u, interpolation='nearest')
ax2.imshow(-ui, interpolation='nearest')
print np.sum(u+ui)
plt.show()
def do_piv(frame_a, frame_b):
#PIV1
piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV2
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV3
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=8)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
return piv
if __name__=='__main__':
main()
|
Add example for inverted image comparison.
An additional example which tests if
a rotation of the testimages along both axis still
yields the same results after a PIV evaluation.
Please note that this works, except for the outlier detection
which is probably due to an asymmetry in the
CloughTocherInterpolator function from scipy.interpolate.import pypiv
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def main():
imgs = ['images/finger1.png', 'images/finger2.png']
frames = [plt.imread(x) for x in imgs]
frame_a, frame_b = frames[0], frames[1]
frame_a_inv = np.copy(frame_a[::-1,::-1])
frame_b_inv = np.copy(frame_b[::-1,::-1])
piv = do_piv(frame_a, frame_b)
piv_inv = do_piv(frame_a_inv, frame_b_inv)
u = piv.u
ui = piv_inv.u[::-1, ::-1]
u[np.isnan(u)] = 1000.
ui[np.isnan(ui)] = -1000.
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(u, interpolation='nearest')
ax2.imshow(-ui, interpolation='nearest')
print np.sum(u+ui)
plt.show()
def do_piv(frame_a, frame_b):
#PIV1
piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV2
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV3
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=8)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
return piv
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Add example for inverted image comparison.
An additional example which tests if
a rotation of the testimages along both axis still
yields the same results after a PIV evaluation.
Please note that this works, except for the outlier detection
which is probably due to an asymmetry in the
CloughTocherInterpolator function from scipy.interpolate.<commit_after>import pypiv
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def main():
imgs = ['images/finger1.png', 'images/finger2.png']
frames = [plt.imread(x) for x in imgs]
frame_a, frame_b = frames[0], frames[1]
frame_a_inv = np.copy(frame_a[::-1,::-1])
frame_b_inv = np.copy(frame_b[::-1,::-1])
piv = do_piv(frame_a, frame_b)
piv_inv = do_piv(frame_a_inv, frame_b_inv)
u = piv.u
ui = piv_inv.u[::-1, ::-1]
u[np.isnan(u)] = 1000.
ui[np.isnan(ui)] = -1000.
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(u, interpolation='nearest')
ax2.imshow(-ui, interpolation='nearest')
print np.sum(u+ui)
plt.show()
def do_piv(frame_a, frame_b):
#PIV1
piv = pypiv.DirectPIV(frame_a, frame_b, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV2
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=16)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
#PIV3
piv = pypiv.AdaptivePIV(piv, window_size=32,
search_size=32, distance=8)
piv.correlate_frames()
#pypiv.filters.outlier_from_local_median(piv, 2.0)
#pypiv.filters.replace_outliers(piv)
pypiv.filters.median_filter(piv)
return piv
if __name__=='__main__':
main()
|
|
f8b2760d1bd218a7be5abfdb489f6afe53d85277
|
tools/setup_server_binding.py
|
tools/setup_server_binding.py
|
# Useful to launch an interactive debugging session in ipython with %ed or %run
from nxdrive.controller import Controller
from nxdrive.model import ServerBinding
c = Controller('~/.nuxeo-drive')
s = c.get_session()
sb = s.query(ServerBinding).one()
|
Add useful python script to launch an interactive debugging session in ipython
|
Add useful python script to launch an interactive debugging session in ipython
|
Python
|
lgpl-2.1
|
arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,IsaacYangSLA/nuxeo-drive,rsoumyassdi/nuxeo-drive,rsoumyassdi/nuxeo-drive,loopingz/nuxeo-drive,loopingz/nuxeo-drive,loopingz/nuxeo-drive,loopingz/nuxeo-drive,rsoumyassdi/nuxeo-drive,rsoumyassdi/nuxeo-drive,IsaacYangSLA/nuxeo-drive,DirkHoffmann/nuxeo-drive,ssdi-drive/nuxeo-drive,arameshkumar/base-nuxeo-drive,DirkHoffmann/nuxeo-drive,DirkHoffmann/nuxeo-drive,arameshkumar/base-nuxeo-drive,DirkHoffmann/nuxeo-drive,arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,ssdi-drive/nuxeo-drive,arameshkumar/base-nuxeo-drive,IsaacYangSLA/nuxeo-drive,ssdi-drive/nuxeo-drive,arameshkumar/nuxeo-drive,arameshkumar/nuxeo-drive,loopingz/nuxeo-drive,DirkHoffmann/nuxeo-drive,arameshkumar/base-nuxeo-drive
|
Add useful python script to launch an interactive debugging session in ipython
|
# Useful to launch an interactive debugging session in ipython with %ed or %run
from nxdrive.controller import Controller
from nxdrive.model import ServerBinding
c = Controller('~/.nuxeo-drive')
s = c.get_session()
sb = s.query(ServerBinding).one()
|
<commit_before><commit_msg>Add useful python script to launch an interactive debugging session in ipython<commit_after>
|
# Useful to launch an interactive debugging session in ipython with %ed or %run
from nxdrive.controller import Controller
from nxdrive.model import ServerBinding
c = Controller('~/.nuxeo-drive')
s = c.get_session()
sb = s.query(ServerBinding).one()
|
Add useful python script to launch an interactive debugging session in ipython# Useful to launch an interactive debugging session in ipython with %ed or %run
from nxdrive.controller import Controller
from nxdrive.model import ServerBinding
c = Controller('~/.nuxeo-drive')
s = c.get_session()
sb = s.query(ServerBinding).one()
|
<commit_before><commit_msg>Add useful python script to launch an interactive debugging session in ipython<commit_after># Useful to launch an interactive debugging session in ipython with %ed or %run
from nxdrive.controller import Controller
from nxdrive.model import ServerBinding
c = Controller('~/.nuxeo-drive')
s = c.get_session()
sb = s.query(ServerBinding).one()
|
|
1ff3735aec0473009c82444d024804fd1579a98f
|
solutions/p7.py
|
solutions/p7.py
|
# 10001st prime
# Find the 10001st prime number
#
import math
def generate_primes(n):
p = 2
primes = [p]
p += 1
primes.append(p)
while len(primes) != n:
p += 2
test_prime = True
# Limit should only be up to the square root of current p, because nothing will exceed that.
sqrt_limit = math.sqrt(p)
for num in primes:
if num > sqrt_limit:
break
if p % num == 0:
test_prime = False
break
if test_prime:
primes.append(p)
return primes
primes = generate_primes(10001)
print primes[-1]
#print primes[10001]
|
Add Solution to Problem 7.
|
Add Solution to Problem 7.
|
Python
|
unlicense
|
iheanyi/ProjectEuler
|
Add Solution to Problem 7.
|
# 10001st prime
# Find the 10001st prime number
#
import math
def generate_primes(n):
p = 2
primes = [p]
p += 1
primes.append(p)
while len(primes) != n:
p += 2
test_prime = True
# Limit should only be up to the square root of current p, because nothing will exceed that.
sqrt_limit = math.sqrt(p)
for num in primes:
if num > sqrt_limit:
break
if p % num == 0:
test_prime = False
break
if test_prime:
primes.append(p)
return primes
primes = generate_primes(10001)
print primes[-1]
#print primes[10001]
|
<commit_before><commit_msg>Add Solution to Problem 7.<commit_after>
|
# 10001st prime
# Find the 10001st prime number
#
import math
def generate_primes(n):
p = 2
primes = [p]
p += 1
primes.append(p)
while len(primes) != n:
p += 2
test_prime = True
# Limit should only be up to the square root of current p, because nothing will exceed that.
sqrt_limit = math.sqrt(p)
for num in primes:
if num > sqrt_limit:
break
if p % num == 0:
test_prime = False
break
if test_prime:
primes.append(p)
return primes
primes = generate_primes(10001)
print primes[-1]
#print primes[10001]
|
Add Solution to Problem 7.# 10001st prime
# Find the 10001st prime number
#
import math
def generate_primes(n):
p = 2
primes = [p]
p += 1
primes.append(p)
while len(primes) != n:
p += 2
test_prime = True
# Limit should only be up to the square root of current p, because nothing will exceed that.
sqrt_limit = math.sqrt(p)
for num in primes:
if num > sqrt_limit:
break
if p % num == 0:
test_prime = False
break
if test_prime:
primes.append(p)
return primes
primes = generate_primes(10001)
print primes[-1]
#print primes[10001]
|
<commit_before><commit_msg>Add Solution to Problem 7.<commit_after># 10001st prime
# Find the 10001st prime number
#
import math
def generate_primes(n):
p = 2
primes = [p]
p += 1
primes.append(p)
while len(primes) != n:
p += 2
test_prime = True
# Limit should only be up to the square root of current p, because nothing will exceed that.
sqrt_limit = math.sqrt(p)
for num in primes:
if num > sqrt_limit:
break
if p % num == 0:
test_prime = False
break
if test_prime:
primes.append(p)
return primes
primes = generate_primes(10001)
print primes[-1]
#print primes[10001]
|
|
a3352bb43c0c9b696e09fbb3dfed2c70e9bb5f12
|
replace_imports.py
|
replace_imports.py
|
from __future__ import with_statement
import os
file_contents = {}
file_imports = {}
def get_file(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_contents.keys():
print(file_name)
try:
with open(file_name, 'r', encoding='UTF-8') as f:
file_contents[file_name] = f.read()
except TypeError:
with open(file_name, 'r') as f:
file_contents[file_name] = f.read()
return file_contents[file_name]
def get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_imports.keys():
lines = get_file(file_name).split('\n')
import_lines = [i.strip('. ') for i in lines if
i.strip()[:len('Require ')] == 'Require ' or
i.strip()[:len('Import ')] == 'Import ']
imports = set((' ' + ' '.join(import_lines)).replace(' Require ', ' ').replace(' Import ', ' ').replace(' Export ', ' ').strip().split(' '))
file_imports[file_name] = tuple(sorted(imports))
return file_imports[file_name]
def merge_imports(*imports):
rtn = []
for import_list in imports:
for i in import_list:
if i not in rtn:
rtn.append(i)
return rtn
def recursively_get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if os.path.exists(file_name):
imports = get_imports(file_name)
imports_list = [recursively_get_imports(i) for i in imports]
return merge_imports(*imports_list) + [file_name[:-2]]
return [file_name[:-2]]
def contents_without_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
contents = get_file(file_name)
lines = [i for i in contents.split('\n') if
i.strip()[:len('Require ')] != 'Require ' and
i.strip()[:len('Import ')] != 'Import ']
return '\n'.join(lines)
def include_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
all_imports = recursively_get_imports(file_name)
remaining_imports = []
rtn = ''
for import_name in all_imports:
if os.path.exists(import_name + '.v'):
rtn += contents_without_imports(import_name)
else:
remaining_imports.append(import_name)
rtn = 'Require Import %s.\n%s' % (' '.join(remaining_imports), rtn)
return rtn
|
Add a python script for combining imports
|
Add a python script for combining imports
|
Python
|
mit
|
JasonGross/coq-tools,JasonGross/coq-tools
|
Add a python script for combining imports
|
from __future__ import with_statement
import os
file_contents = {}
file_imports = {}
def get_file(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_contents.keys():
print(file_name)
try:
with open(file_name, 'r', encoding='UTF-8') as f:
file_contents[file_name] = f.read()
except TypeError:
with open(file_name, 'r') as f:
file_contents[file_name] = f.read()
return file_contents[file_name]
def get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_imports.keys():
lines = get_file(file_name).split('\n')
import_lines = [i.strip('. ') for i in lines if
i.strip()[:len('Require ')] == 'Require ' or
i.strip()[:len('Import ')] == 'Import ']
imports = set((' ' + ' '.join(import_lines)).replace(' Require ', ' ').replace(' Import ', ' ').replace(' Export ', ' ').strip().split(' '))
file_imports[file_name] = tuple(sorted(imports))
return file_imports[file_name]
def merge_imports(*imports):
rtn = []
for import_list in imports:
for i in import_list:
if i not in rtn:
rtn.append(i)
return rtn
def recursively_get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if os.path.exists(file_name):
imports = get_imports(file_name)
imports_list = [recursively_get_imports(i) for i in imports]
return merge_imports(*imports_list) + [file_name[:-2]]
return [file_name[:-2]]
def contents_without_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
contents = get_file(file_name)
lines = [i for i in contents.split('\n') if
i.strip()[:len('Require ')] != 'Require ' and
i.strip()[:len('Import ')] != 'Import ']
return '\n'.join(lines)
def include_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
all_imports = recursively_get_imports(file_name)
remaining_imports = []
rtn = ''
for import_name in all_imports:
if os.path.exists(import_name + '.v'):
rtn += contents_without_imports(import_name)
else:
remaining_imports.append(import_name)
rtn = 'Require Import %s.\n%s' % (' '.join(remaining_imports), rtn)
return rtn
|
<commit_before><commit_msg>Add a python script for combining imports<commit_after>
|
from __future__ import with_statement
import os
file_contents = {}
file_imports = {}
def get_file(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_contents.keys():
print(file_name)
try:
with open(file_name, 'r', encoding='UTF-8') as f:
file_contents[file_name] = f.read()
except TypeError:
with open(file_name, 'r') as f:
file_contents[file_name] = f.read()
return file_contents[file_name]
def get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_imports.keys():
lines = get_file(file_name).split('\n')
import_lines = [i.strip('. ') for i in lines if
i.strip()[:len('Require ')] == 'Require ' or
i.strip()[:len('Import ')] == 'Import ']
imports = set((' ' + ' '.join(import_lines)).replace(' Require ', ' ').replace(' Import ', ' ').replace(' Export ', ' ').strip().split(' '))
file_imports[file_name] = tuple(sorted(imports))
return file_imports[file_name]
def merge_imports(*imports):
rtn = []
for import_list in imports:
for i in import_list:
if i not in rtn:
rtn.append(i)
return rtn
def recursively_get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if os.path.exists(file_name):
imports = get_imports(file_name)
imports_list = [recursively_get_imports(i) for i in imports]
return merge_imports(*imports_list) + [file_name[:-2]]
return [file_name[:-2]]
def contents_without_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
contents = get_file(file_name)
lines = [i for i in contents.split('\n') if
i.strip()[:len('Require ')] != 'Require ' and
i.strip()[:len('Import ')] != 'Import ']
return '\n'.join(lines)
def include_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
all_imports = recursively_get_imports(file_name)
remaining_imports = []
rtn = ''
for import_name in all_imports:
if os.path.exists(import_name + '.v'):
rtn += contents_without_imports(import_name)
else:
remaining_imports.append(import_name)
rtn = 'Require Import %s.\n%s' % (' '.join(remaining_imports), rtn)
return rtn
|
Add a python script for combining importsfrom __future__ import with_statement
import os
file_contents = {}
file_imports = {}
def get_file(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_contents.keys():
print(file_name)
try:
with open(file_name, 'r', encoding='UTF-8') as f:
file_contents[file_name] = f.read()
except TypeError:
with open(file_name, 'r') as f:
file_contents[file_name] = f.read()
return file_contents[file_name]
def get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_imports.keys():
lines = get_file(file_name).split('\n')
import_lines = [i.strip('. ') for i in lines if
i.strip()[:len('Require ')] == 'Require ' or
i.strip()[:len('Import ')] == 'Import ']
imports = set((' ' + ' '.join(import_lines)).replace(' Require ', ' ').replace(' Import ', ' ').replace(' Export ', ' ').strip().split(' '))
file_imports[file_name] = tuple(sorted(imports))
return file_imports[file_name]
def merge_imports(*imports):
rtn = []
for import_list in imports:
for i in import_list:
if i not in rtn:
rtn.append(i)
return rtn
def recursively_get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if os.path.exists(file_name):
imports = get_imports(file_name)
imports_list = [recursively_get_imports(i) for i in imports]
return merge_imports(*imports_list) + [file_name[:-2]]
return [file_name[:-2]]
def contents_without_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
contents = get_file(file_name)
lines = [i for i in contents.split('\n') if
i.strip()[:len('Require ')] != 'Require ' and
i.strip()[:len('Import ')] != 'Import ']
return '\n'.join(lines)
def include_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
all_imports = recursively_get_imports(file_name)
remaining_imports = []
rtn = ''
for import_name in all_imports:
if os.path.exists(import_name + '.v'):
rtn += contents_without_imports(import_name)
else:
remaining_imports.append(import_name)
rtn = 'Require Import %s.\n%s' % (' '.join(remaining_imports), rtn)
return rtn
|
<commit_before><commit_msg>Add a python script for combining imports<commit_after>from __future__ import with_statement
import os
file_contents = {}
file_imports = {}
def get_file(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_contents.keys():
print(file_name)
try:
with open(file_name, 'r', encoding='UTF-8') as f:
file_contents[file_name] = f.read()
except TypeError:
with open(file_name, 'r') as f:
file_contents[file_name] = f.read()
return file_contents[file_name]
def get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if file_name not in file_imports.keys():
lines = get_file(file_name).split('\n')
import_lines = [i.strip('. ') for i in lines if
i.strip()[:len('Require ')] == 'Require ' or
i.strip()[:len('Import ')] == 'Import ']
imports = set((' ' + ' '.join(import_lines)).replace(' Require ', ' ').replace(' Import ', ' ').replace(' Export ', ' ').strip().split(' '))
file_imports[file_name] = tuple(sorted(imports))
return file_imports[file_name]
def merge_imports(*imports):
rtn = []
for import_list in imports:
for i in import_list:
if i not in rtn:
rtn.append(i)
return rtn
def recursively_get_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
if os.path.exists(file_name):
imports = get_imports(file_name)
imports_list = [recursively_get_imports(i) for i in imports]
return merge_imports(*imports_list) + [file_name[:-2]]
return [file_name[:-2]]
def contents_without_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
contents = get_file(file_name)
lines = [i for i in contents.split('\n') if
i.strip()[:len('Require ')] != 'Require ' and
i.strip()[:len('Import ')] != 'Import ']
return '\n'.join(lines)
def include_imports(file_name):
if file_name[-2:] != '.v': file_name += '.v'
all_imports = recursively_get_imports(file_name)
remaining_imports = []
rtn = ''
for import_name in all_imports:
if os.path.exists(import_name + '.v'):
rtn += contents_without_imports(import_name)
else:
remaining_imports.append(import_name)
rtn = 'Require Import %s.\n%s' % (' '.join(remaining_imports), rtn)
return rtn
|
|
3ea10a12026341f692e0bb6abfebdb1b96aa41b2
|
examples/list-public-addresses.py
|
examples/list-public-addresses.py
|
import openshift3.endpoints as endpoints
import openshift3.resources as resources
client = endpoints.Client()
projects = client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
def public_address(route):
host = route.spec.host
path = route.spec.path or '/'
if route.spec.tls:
return 'https://%s%s' % (host, path)
return 'https://%s%s' % (host, path)
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
routes = client.oapi.v1.namespaces(namespace=namespace).routes.get()
for route in routes.items:
print(' route=%r' % public_address(route))
#print()
#print(resources.dumps(routes, indent=4, sort_keys=True))
|
Add example of listing exposed URLs for services.
|
Add example of listing exposed URLs for services.
|
Python
|
bsd-2-clause
|
getwarped/powershift
|
Add example of listing exposed URLs for services.
|
import openshift3.endpoints as endpoints
import openshift3.resources as resources
client = endpoints.Client()
projects = client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
def public_address(route):
host = route.spec.host
path = route.spec.path or '/'
if route.spec.tls:
return 'https://%s%s' % (host, path)
return 'https://%s%s' % (host, path)
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
routes = client.oapi.v1.namespaces(namespace=namespace).routes.get()
for route in routes.items:
print(' route=%r' % public_address(route))
#print()
#print(resources.dumps(routes, indent=4, sort_keys=True))
|
<commit_before><commit_msg>Add example of listing exposed URLs for services.<commit_after>
|
import openshift3.endpoints as endpoints
import openshift3.resources as resources
client = endpoints.Client()
projects = client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
def public_address(route):
host = route.spec.host
path = route.spec.path or '/'
if route.spec.tls:
return 'https://%s%s' % (host, path)
return 'https://%s%s' % (host, path)
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
routes = client.oapi.v1.namespaces(namespace=namespace).routes.get()
for route in routes.items:
print(' route=%r' % public_address(route))
#print()
#print(resources.dumps(routes, indent=4, sort_keys=True))
|
Add example of listing exposed URLs for services.import openshift3.endpoints as endpoints
import openshift3.resources as resources
client = endpoints.Client()
projects = client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
def public_address(route):
host = route.spec.host
path = route.spec.path or '/'
if route.spec.tls:
return 'https://%s%s' % (host, path)
return 'https://%s%s' % (host, path)
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
routes = client.oapi.v1.namespaces(namespace=namespace).routes.get()
for route in routes.items:
print(' route=%r' % public_address(route))
#print()
#print(resources.dumps(routes, indent=4, sort_keys=True))
|
<commit_before><commit_msg>Add example of listing exposed URLs for services.<commit_after>import openshift3.endpoints as endpoints
import openshift3.resources as resources
client = endpoints.Client()
projects = client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
def public_address(route):
host = route.spec.host
path = route.spec.path or '/'
if route.spec.tls:
return 'https://%s%s' % (host, path)
return 'https://%s%s' % (host, path)
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
routes = client.oapi.v1.namespaces(namespace=namespace).routes.get()
for route in routes.items:
print(' route=%r' % public_address(route))
#print()
#print(resources.dumps(routes, indent=4, sort_keys=True))
|
|
600432490f78e1ed74ea334a5a46a30edd384a11
|
process_test.py
|
process_test.py
|
import argparse, time
from pycompss.api.task import task
from pycompss.api.parameter import *
#class process_test:
#
# def __init__(self):
# self.ready = True
@task(x = IN)
def main(x):
print time.time(), x
y = range(1)
#pt = process_test()
map(main, y)
|
Test process for running on COMPSs
|
Test process for running on COMPSs
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq
|
Test process for running on COMPSs
|
import argparse, time
from pycompss.api.task import task
from pycompss.api.parameter import *
#class process_test:
#
# def __init__(self):
# self.ready = True
@task(x = IN)
def main(x):
print time.time(), x
y = range(1)
#pt = process_test()
map(main, y)
|
<commit_before><commit_msg>Test process for running on COMPSs<commit_after>
|
import argparse, time
from pycompss.api.task import task
from pycompss.api.parameter import *
#class process_test:
#
# def __init__(self):
# self.ready = True
@task(x = IN)
def main(x):
print time.time(), x
y = range(1)
#pt = process_test()
map(main, y)
|
Test process for running on COMPSsimport argparse, time
from pycompss.api.task import task
from pycompss.api.parameter import *
#class process_test:
#
# def __init__(self):
# self.ready = True
@task(x = IN)
def main(x):
print time.time(), x
y = range(1)
#pt = process_test()
map(main, y)
|
<commit_before><commit_msg>Test process for running on COMPSs<commit_after>import argparse, time
from pycompss.api.task import task
from pycompss.api.parameter import *
#class process_test:
#
# def __init__(self):
# self.ready = True
@task(x = IN)
def main(x):
print time.time(), x
y = range(1)
#pt = process_test()
map(main, y)
|
|
af566e0cd0958dadd0de55d51b50ad026a8f2b99
|
build_android_prepare.py
|
build_android_prepare.py
|
"""Configuration for the Caffe2 installation.
"""
from build import Config
import sys
Config.USE_SYSTEM_PROTOBUF = False
Config.PROTOC_BINARY = 'gen/third_party/google/protoc'
Config.USE_OPENMP = False
if __name__ == '__main__':
from brewtool.brewery import Brewery
Brewery.Run(
Config,
['build_android_prepare.py',
'build', '//third_party/google:protoc'])
else:
print('This script is not intended to be used as an imported module.')
sys.exit(1)
|
Add a simple script to help build android.
|
Add a simple script to help build android.
|
Python
|
apache-2.0
|
davinwang/caffe2,sf-wind/caffe2,davinwang/caffe2,bwasti/caffe2,pietern/caffe2,xzturn/caffe2,davinwang/caffe2,sf-wind/caffe2,bwasti/caffe2,davinwang/caffe2,bwasti/caffe2,bwasti/caffe2,sf-wind/caffe2,caffe2/caffe2,pietern/caffe2,pietern/caffe2,Yangqing/caffe2,xzturn/caffe2,Yangqing/caffe2,davinwang/caffe2,Yangqing/caffe2,xzturn/caffe2,bwasti/caffe2,sf-wind/caffe2,Yangqing/caffe2,xzturn/caffe2,pietern/caffe2,xzturn/caffe2,sf-wind/caffe2,Yangqing/caffe2,pietern/caffe2
|
Add a simple script to help build android.
|
"""Configuration for the Caffe2 installation.
"""
from build import Config
import sys
Config.USE_SYSTEM_PROTOBUF = False
Config.PROTOC_BINARY = 'gen/third_party/google/protoc'
Config.USE_OPENMP = False
if __name__ == '__main__':
from brewtool.brewery import Brewery
Brewery.Run(
Config,
['build_android_prepare.py',
'build', '//third_party/google:protoc'])
else:
print('This script is not intended to be used as an imported module.')
sys.exit(1)
|
<commit_before><commit_msg>Add a simple script to help build android.<commit_after>
|
"""Configuration for the Caffe2 installation.
"""
from build import Config
import sys
Config.USE_SYSTEM_PROTOBUF = False
Config.PROTOC_BINARY = 'gen/third_party/google/protoc'
Config.USE_OPENMP = False
if __name__ == '__main__':
from brewtool.brewery import Brewery
Brewery.Run(
Config,
['build_android_prepare.py',
'build', '//third_party/google:protoc'])
else:
print('This script is not intended to be used as an imported module.')
sys.exit(1)
|
Add a simple script to help build android."""Configuration for the Caffe2 installation.
"""
from build import Config
import sys
Config.USE_SYSTEM_PROTOBUF = False
Config.PROTOC_BINARY = 'gen/third_party/google/protoc'
Config.USE_OPENMP = False
if __name__ == '__main__':
from brewtool.brewery import Brewery
Brewery.Run(
Config,
['build_android_prepare.py',
'build', '//third_party/google:protoc'])
else:
print('This script is not intended to be used as an imported module.')
sys.exit(1)
|
<commit_before><commit_msg>Add a simple script to help build android.<commit_after>"""Configuration for the Caffe2 installation.
"""
from build import Config
import sys
Config.USE_SYSTEM_PROTOBUF = False
Config.PROTOC_BINARY = 'gen/third_party/google/protoc'
Config.USE_OPENMP = False
if __name__ == '__main__':
from brewtool.brewery import Brewery
Brewery.Run(
Config,
['build_android_prepare.py',
'build', '//third_party/google:protoc'])
else:
print('This script is not intended to be used as an imported module.')
sys.exit(1)
|
|
153fe45973f2299f14d6c444f93a34ca82ea7aa4
|
tensorflow/examples/speech_commands/conv_only.py
|
tensorflow/examples/speech_commands/conv_only.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import wave
import struct
import numpy as np
import sys
graphdef = tf.GraphDef()
from tensorflow.python.tools.optimize_for_inference import optimize_for_inference_lib
# Make new placeholders which will be inputs to the model
input = tf.placeholder(dtype=tf.float32, shape=(16000,1))
sample_rate = tf.placeholder(dtype=tf.int32, shape=())
graphdef.ParseFromString(open("conv_actions_frozen.pb",'rb').read())
# Load and remap unsupported ops (decode wav mostly)
labels, = tf.import_graph_def(graphdef, {"decoded_sample_data": input, "decoded_sample_data:1": sample_rate}, return_elements=["labels_softmax:0"],name="")
sess = tf.Session()
# Wrap shape shape to be (1,)
class DummyTensor:
def __init__(self, x):
self.name = x.name
self.dtype = x.dtype
def get_shape(self):
return (1,)
sample_rate= DummyTensor(sample_rate)
# optimize graph
def removeout(x): return x.split(":")[0]
curr = optimize_for_inference_lib.optimize_for_inference(sess.graph_def, [removeout(input.name), removeout(sample_rate.name)], [removeout(labels.name)], tf.float32.as_datatype_enum, True)
# Convert and write the model
data = tf.contrib.lite.toco_convert(curr, [input, sample_rate], [labels], allow_custom_ops=True)
open("conv.tflite","wb").write(data)
# make sure it runs
foo = tf.contrib.lite.Interpreter(model_path="conv.tflite")
foo.allocate_tensors()
print foo.get_tensor(foo.get_input_details()[0]["index"]).shape
foo.set_tensor(foo.get_input_details()[0]["index"], np.zeros((16000, 1), np.float32))
print(foo.get_tensor(foo.get_input_details()[1]["index"]).shape)
foo.set_tensor(foo.get_input_details()[1]["index"], np.array((44100,), np.float32))
foo.invoke()
|
Add conv to tflite script for conv_actions.
|
Add conv to tflite script for conv_actions.
|
Python
|
apache-2.0
|
aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow,aselle/tensorflow
|
Add conv to tflite script for conv_actions.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import wave
import struct
import numpy as np
import sys
graphdef = tf.GraphDef()
from tensorflow.python.tools.optimize_for_inference import optimize_for_inference_lib
# Make new placeholders which will be inputs to the model
input = tf.placeholder(dtype=tf.float32, shape=(16000,1))
sample_rate = tf.placeholder(dtype=tf.int32, shape=())
graphdef.ParseFromString(open("conv_actions_frozen.pb",'rb').read())
# Load and remap unsupported ops (decode wav mostly)
labels, = tf.import_graph_def(graphdef, {"decoded_sample_data": input, "decoded_sample_data:1": sample_rate}, return_elements=["labels_softmax:0"],name="")
sess = tf.Session()
# Wrap shape shape to be (1,)
class DummyTensor:
def __init__(self, x):
self.name = x.name
self.dtype = x.dtype
def get_shape(self):
return (1,)
sample_rate= DummyTensor(sample_rate)
# optimize graph
def removeout(x): return x.split(":")[0]
curr = optimize_for_inference_lib.optimize_for_inference(sess.graph_def, [removeout(input.name), removeout(sample_rate.name)], [removeout(labels.name)], tf.float32.as_datatype_enum, True)
# Convert and write the model
data = tf.contrib.lite.toco_convert(curr, [input, sample_rate], [labels], allow_custom_ops=True)
open("conv.tflite","wb").write(data)
# make sure it runs
foo = tf.contrib.lite.Interpreter(model_path="conv.tflite")
foo.allocate_tensors()
print foo.get_tensor(foo.get_input_details()[0]["index"]).shape
foo.set_tensor(foo.get_input_details()[0]["index"], np.zeros((16000, 1), np.float32))
print(foo.get_tensor(foo.get_input_details()[1]["index"]).shape)
foo.set_tensor(foo.get_input_details()[1]["index"], np.array((44100,), np.float32))
foo.invoke()
|
<commit_before><commit_msg>Add conv to tflite script for conv_actions.<commit_after>
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import wave
import struct
import numpy as np
import sys
graphdef = tf.GraphDef()
from tensorflow.python.tools.optimize_for_inference import optimize_for_inference_lib
# Make new placeholders which will be inputs to the model
input = tf.placeholder(dtype=tf.float32, shape=(16000,1))
sample_rate = tf.placeholder(dtype=tf.int32, shape=())
graphdef.ParseFromString(open("conv_actions_frozen.pb",'rb').read())
# Load and remap unsupported ops (decode wav mostly)
labels, = tf.import_graph_def(graphdef, {"decoded_sample_data": input, "decoded_sample_data:1": sample_rate}, return_elements=["labels_softmax:0"],name="")
sess = tf.Session()
# Wrap shape shape to be (1,)
class DummyTensor:
def __init__(self, x):
self.name = x.name
self.dtype = x.dtype
def get_shape(self):
return (1,)
sample_rate= DummyTensor(sample_rate)
# optimize graph
def removeout(x): return x.split(":")[0]
curr = optimize_for_inference_lib.optimize_for_inference(sess.graph_def, [removeout(input.name), removeout(sample_rate.name)], [removeout(labels.name)], tf.float32.as_datatype_enum, True)
# Convert and write the model
data = tf.contrib.lite.toco_convert(curr, [input, sample_rate], [labels], allow_custom_ops=True)
open("conv.tflite","wb").write(data)
# make sure it runs
foo = tf.contrib.lite.Interpreter(model_path="conv.tflite")
foo.allocate_tensors()
print foo.get_tensor(foo.get_input_details()[0]["index"]).shape
foo.set_tensor(foo.get_input_details()[0]["index"], np.zeros((16000, 1), np.float32))
print(foo.get_tensor(foo.get_input_details()[1]["index"]).shape)
foo.set_tensor(foo.get_input_details()[1]["index"], np.array((44100,), np.float32))
foo.invoke()
|
Add conv to tflite script for conv_actions.# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import wave
import struct
import numpy as np
import sys
graphdef = tf.GraphDef()
from tensorflow.python.tools.optimize_for_inference import optimize_for_inference_lib
# Make new placeholders which will be inputs to the model
input = tf.placeholder(dtype=tf.float32, shape=(16000,1))
sample_rate = tf.placeholder(dtype=tf.int32, shape=())
graphdef.ParseFromString(open("conv_actions_frozen.pb",'rb').read())
# Load and remap unsupported ops (decode wav mostly)
labels, = tf.import_graph_def(graphdef, {"decoded_sample_data": input, "decoded_sample_data:1": sample_rate}, return_elements=["labels_softmax:0"],name="")
sess = tf.Session()
# Wrap shape shape to be (1,)
class DummyTensor:
def __init__(self, x):
self.name = x.name
self.dtype = x.dtype
def get_shape(self):
return (1,)
sample_rate= DummyTensor(sample_rate)
# optimize graph
def removeout(x): return x.split(":")[0]
curr = optimize_for_inference_lib.optimize_for_inference(sess.graph_def, [removeout(input.name), removeout(sample_rate.name)], [removeout(labels.name)], tf.float32.as_datatype_enum, True)
# Convert and write the model
data = tf.contrib.lite.toco_convert(curr, [input, sample_rate], [labels], allow_custom_ops=True)
open("conv.tflite","wb").write(data)
# make sure it runs
foo = tf.contrib.lite.Interpreter(model_path="conv.tflite")
foo.allocate_tensors()
print foo.get_tensor(foo.get_input_details()[0]["index"]).shape
foo.set_tensor(foo.get_input_details()[0]["index"], np.zeros((16000, 1), np.float32))
print(foo.get_tensor(foo.get_input_details()[1]["index"]).shape)
foo.set_tensor(foo.get_input_details()[1]["index"], np.array((44100,), np.float32))
foo.invoke()
|
<commit_before><commit_msg>Add conv to tflite script for conv_actions.<commit_after># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tensorflow as tf
import wave
import struct
import numpy as np
import sys
graphdef = tf.GraphDef()
from tensorflow.python.tools.optimize_for_inference import optimize_for_inference_lib
# Make new placeholders which will be inputs to the model
input = tf.placeholder(dtype=tf.float32, shape=(16000,1))
sample_rate = tf.placeholder(dtype=tf.int32, shape=())
graphdef.ParseFromString(open("conv_actions_frozen.pb",'rb').read())
# Load and remap unsupported ops (decode wav mostly)
labels, = tf.import_graph_def(graphdef, {"decoded_sample_data": input, "decoded_sample_data:1": sample_rate}, return_elements=["labels_softmax:0"],name="")
sess = tf.Session()
# Wrap shape shape to be (1,)
class DummyTensor:
def __init__(self, x):
self.name = x.name
self.dtype = x.dtype
def get_shape(self):
return (1,)
sample_rate= DummyTensor(sample_rate)
# optimize graph
def removeout(x): return x.split(":")[0]
curr = optimize_for_inference_lib.optimize_for_inference(sess.graph_def, [removeout(input.name), removeout(sample_rate.name)], [removeout(labels.name)], tf.float32.as_datatype_enum, True)
# Convert and write the model
data = tf.contrib.lite.toco_convert(curr, [input, sample_rate], [labels], allow_custom_ops=True)
open("conv.tflite","wb").write(data)
# make sure it runs
foo = tf.contrib.lite.Interpreter(model_path="conv.tflite")
foo.allocate_tensors()
print foo.get_tensor(foo.get_input_details()[0]["index"]).shape
foo.set_tensor(foo.get_input_details()[0]["index"], np.zeros((16000, 1), np.float32))
print(foo.get_tensor(foo.get_input_details()[1]["index"]).shape)
foo.set_tensor(foo.get_input_details()[1]["index"], np.array((44100,), np.float32))
foo.invoke()
|
|
86241da03ecab6b540944004b8bcbb4f3c577bdc
|
numba/cuda/tests/cudapy/test_dispatcher.py
|
numba/cuda/tests/cudapy/test_dispatcher.py
|
from numba import cuda, float32, int32
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize((float32[::1],))
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f)
def test_no_double_specialize_no_sig(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize((float32[::1],))
self._test_no_double_specialize(f_specialized)
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize((int32[::1],))
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
if __name__ == '__main__':
unittest.main()
|
Add Dispatcher specialization cache tests
|
CUDA: Add Dispatcher specialization cache tests
|
Python
|
bsd-2-clause
|
stonebig/numba,stuartarchibald/numba,seibert/numba,seibert/numba,IntelLabs/numba,sklam/numba,gmarkall/numba,gmarkall/numba,cpcloud/numba,sklam/numba,stonebig/numba,numba/numba,IntelLabs/numba,gmarkall/numba,stuartarchibald/numba,numba/numba,seibert/numba,seibert/numba,stuartarchibald/numba,stonebig/numba,gmarkall/numba,numba/numba,IntelLabs/numba,IntelLabs/numba,cpcloud/numba,sklam/numba,seibert/numba,stuartarchibald/numba,cpcloud/numba,sklam/numba,sklam/numba,numba/numba,numba/numba,cpcloud/numba,stonebig/numba,gmarkall/numba,stonebig/numba,stuartarchibald/numba,cpcloud/numba,IntelLabs/numba
|
CUDA: Add Dispatcher specialization cache tests
|
from numba import cuda, float32, int32
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize((float32[::1],))
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f)
def test_no_double_specialize_no_sig(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize((float32[::1],))
self._test_no_double_specialize(f_specialized)
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize((int32[::1],))
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>CUDA: Add Dispatcher specialization cache tests<commit_after>
|
from numba import cuda, float32, int32
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize((float32[::1],))
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f)
def test_no_double_specialize_no_sig(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize((float32[::1],))
self._test_no_double_specialize(f_specialized)
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize((int32[::1],))
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
if __name__ == '__main__':
unittest.main()
|
CUDA: Add Dispatcher specialization cache testsfrom numba import cuda, float32, int32
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize((float32[::1],))
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f)
def test_no_double_specialize_no_sig(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize((float32[::1],))
self._test_no_double_specialize(f_specialized)
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize((int32[::1],))
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>CUDA: Add Dispatcher specialization cache tests<commit_after>from numba import cuda, float32, int32
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize((float32[::1],))
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f)
def test_no_double_specialize_no_sig(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize((float32[::1],))
self._test_no_double_specialize(f_specialized)
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize((float32[::1],))
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize((int32[::1],))
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
if __name__ == '__main__':
unittest.main()
|
|
277ec688d7f92c415446e700db42386620d9b418
|
satnogsclient/settings.py
|
satnogsclient/settings.py
|
from os import environ
DEMODULATION_COMMAND = environ.get('DEMODULATION_COMMAND', None)
ENCODING_COMMAND = environ.get('ENCODING_COMMAND', None)
DECODING_COMMAND = environ.get('DECODING_COMMAND', None)
|
Add configuration file for client
|
Add configuration file for client
|
Python
|
agpl-3.0
|
adamkalis/satnogs-client,cshields/satnogs-client,adamkalis/satnogs-client,cshields/satnogs-client
|
Add configuration file for client
|
from os import environ
DEMODULATION_COMMAND = environ.get('DEMODULATION_COMMAND', None)
ENCODING_COMMAND = environ.get('ENCODING_COMMAND', None)
DECODING_COMMAND = environ.get('DECODING_COMMAND', None)
|
<commit_before><commit_msg>Add configuration file for client<commit_after>
|
from os import environ
DEMODULATION_COMMAND = environ.get('DEMODULATION_COMMAND', None)
ENCODING_COMMAND = environ.get('ENCODING_COMMAND', None)
DECODING_COMMAND = environ.get('DECODING_COMMAND', None)
|
Add configuration file for clientfrom os import environ
DEMODULATION_COMMAND = environ.get('DEMODULATION_COMMAND', None)
ENCODING_COMMAND = environ.get('ENCODING_COMMAND', None)
DECODING_COMMAND = environ.get('DECODING_COMMAND', None)
|
<commit_before><commit_msg>Add configuration file for client<commit_after>from os import environ
DEMODULATION_COMMAND = environ.get('DEMODULATION_COMMAND', None)
ENCODING_COMMAND = environ.get('ENCODING_COMMAND', None)
DECODING_COMMAND = environ.get('DECODING_COMMAND', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.