commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b72a4bb06fda18ebca91649808cd2f2c531b392e
|
migrations/versions/0060.py
|
migrations/versions/0060.py
|
"""empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
|
Set all events to show banner text
|
Set all events to show banner text
|
Python
|
mit
|
NewAcropolis/api,NewAcropolis/api,NewAcropolis/api
|
Set all events to show banner text
|
"""empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
|
<commit_before><commit_msg>Set all events to show banner text<commit_after>
|
"""empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
|
Set all events to show banner text"""empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
|
<commit_before><commit_msg>Set all events to show banner text<commit_after>"""empty message
Revision ID: 0060 set all show_banner_text
Revises: 0059 add show_banner_text
Create Date: 2021-10-03 00:31:22.285217
"""
# revision identifiers, used by Alembic.
revision = '0060 set all show_banner_text'
down_revision = '0059 add show_banner_text'
from alembic import op
def upgrade():
op.execute("UPDATE events SET show_banner_text = True")
def downgrade():
pass
|
|
79637efbdda03cea88fa6a59b24a27f1d393c79f
|
corehq/util/tests/test_es_interface.py
|
corehq/util/tests/test_es_interface.py
|
from django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
|
Add tests for previous commit
|
Add tests for previous commit
inb4 this is backwards
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add tests for previous commit
inb4 this is backwards
|
from django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
|
<commit_before><commit_msg>Add tests for previous commit
inb4 this is backwards<commit_after>
|
from django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
|
Add tests for previous commit
inb4 this is backwardsfrom django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
|
<commit_before><commit_msg>Add tests for previous commit
inb4 this is backwards<commit_after>from django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
|
|
fd3eaa3810ce82db864b3fcafe61d16ab53d85e5
|
perftest/scripts/webserver.py
|
perftest/scripts/webserver.py
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
Add simple Python web server for performance testing
|
perftest: Add simple Python web server for performance testing
In order to profile bottlenecks in CCF's HTTP communication, a simple
Python webserver comes very handy. One can reason that the existing HTTP
server in CCF does not meet the performance requirements, if the
perftest client is clearly faster against the Python web server.
The web server will respond with same response despite of the request
path or request type, and can be started up with:
> python webserver.py
Signed-off-by: Harri Salokorpi <eefb23b8deb8c30dca252cd0ae751b67337dc69f@reaktor.fi>
Signed-off-by: Karim Osman <94396faf6cc817be76165477038b3cfa832e89a4@reaktor.fi>
|
Python
|
apache-2.0
|
akisaarinen/ccf,akisaarinen/ccf
|
perftest: Add simple Python web server for performance testing
In order to profile bottlenecks in CCF's HTTP communication, a simple
Python webserver comes very handy. One can reason that the existing HTTP
server in CCF does not meet the performance requirements, if the
perftest client is clearly faster against the Python web server.
The web server will respond with same response despite of the request
path or request type, and can be started up with:
> python webserver.py
Signed-off-by: Harri Salokorpi <eefb23b8deb8c30dca252cd0ae751b67337dc69f@reaktor.fi>
Signed-off-by: Karim Osman <94396faf6cc817be76165477038b3cfa832e89a4@reaktor.fi>
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>perftest: Add simple Python web server for performance testing
In order to profile bottlenecks in CCF's HTTP communication, a simple
Python webserver comes very handy. One can reason that the existing HTTP
server in CCF does not meet the performance requirements, if the
perftest client is clearly faster against the Python web server.
The web server will respond with same response despite of the request
path or request type, and can be started up with:
> python webserver.py
Signed-off-by: Harri Salokorpi <eefb23b8deb8c30dca252cd0ae751b67337dc69f@reaktor.fi>
Signed-off-by: Karim Osman <94396faf6cc817be76165477038b3cfa832e89a4@reaktor.fi><commit_after>
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
perftest: Add simple Python web server for performance testing
In order to profile bottlenecks in CCF's HTTP communication, a simple
Python webserver comes very handy. One can reason that the existing HTTP
server in CCF does not meet the performance requirements, if the
perftest client is clearly faster against the Python web server.
The web server will respond with same response despite of the request
path or request type, and can be started up with:
> python webserver.py
Signed-off-by: Harri Salokorpi <eefb23b8deb8c30dca252cd0ae751b67337dc69f@reaktor.fi>
Signed-off-by: Karim Osman <94396faf6cc817be76165477038b3cfa832e89a4@reaktor.fi>from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>perftest: Add simple Python web server for performance testing
In order to profile bottlenecks in CCF's HTTP communication, a simple
Python webserver comes very handy. One can reason that the existing HTTP
server in CCF does not meet the performance requirements, if the
perftest client is clearly faster against the Python web server.
The web server will respond with same response despite of the request
path or request type, and can be started up with:
> python webserver.py
Signed-off-by: Harri Salokorpi <eefb23b8deb8c30dca252cd0ae751b67337dc69f@reaktor.fi>
Signed-off-by: Karim Osman <94396faf6cc817be76165477038b3cfa832e89a4@reaktor.fi><commit_after>from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
|
|
72be8a8fd8345542096ba31e3f1428ea25ea9498
|
ex6.py
|
ex6.py
|
end1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
Print with vs without a comma
|
Print with vs without a comma
|
Python
|
mit
|
nguyennam9696/Learn_Python_The_Hard_Way
|
Print with vs without a comma
|
end1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
<commit_before><commit_msg>Print with vs without a comma<commit_after>
|
end1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
Print with vs without a commaend1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
<commit_before><commit_msg>Print with vs without a comma<commit_after>end1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
|
96035f6bb2a298cea859b1e5e9812e2dd83982d2
|
dnanexus/shell/resources/home/dnanexus/upload_file.py
|
dnanexus/shell/resources/home/dnanexus/upload_file.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
Add script to upload files to shell applet
|
Add script to upload files to shell applet
|
Python
|
mit
|
ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline
|
Add script to upload files to shell applet
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
<commit_before><commit_msg>Add script to upload files to shell applet<commit_after>
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
Add script to upload files to shell applet#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
<commit_before><commit_msg>Add script to upload files to shell applet<commit_after>#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
|
|
85f6b2437b57c6e33ff56422b15aaab690704218
|
ckanext/doi/tests/test_schema.py
|
ckanext/doi/tests/test_schema.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
|
Add test to validate against schema
|
Add test to validate against schema
|
Python
|
mit
|
NaturalHistoryMuseum/ckanext-doi,NaturalHistoryMuseum/ckanext-doi,NaturalHistoryMuseum/ckanext-doi
|
Add test to validate against schema
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
|
<commit_before><commit_msg>Add test to validate against schema<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
|
Add test to validate against schema#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
|
<commit_before><commit_msg>Add test to validate against schema<commit_after>#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
|
|
a7629ef3acedaa688a455c01afb65c40a53c14b0
|
tests/test_draw.py
|
tests/test_draw.py
|
import batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
|
Add unit test for drawing routines
|
Add unit test for drawing routines
|
Python
|
bsd-2-clause
|
jmeyers314/jtrace,jmeyers314/batoid,jmeyers314/jtrace,jmeyers314/batoid,jmeyers314/jtrace
|
Add unit test for drawing routines
|
import batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
|
<commit_before><commit_msg>Add unit test for drawing routines<commit_after>
|
import batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
|
Add unit test for drawing routinesimport batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
|
<commit_before><commit_msg>Add unit test for drawing routines<commit_after>import batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
|
|
41f0533edc9ebe788722711af95e040d4f06abb9
|
lglass/bird.py
|
lglass/bird.py
|
# coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
|
Add simple BIRD client class
|
Add simple BIRD client class
|
Python
|
mit
|
fritz0705/lglass
|
Add simple BIRD client class
|
# coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
|
<commit_before><commit_msg>Add simple BIRD client class<commit_after>
|
# coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
|
Add simple BIRD client class# coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
|
<commit_before><commit_msg>Add simple BIRD client class<commit_after># coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
|
|
064386acbe509f872e40f3f577e7b6189ed91434
|
src/test/ed/lang/python/thread2_test.py
|
src/test/ed/lang/python/thread2_test.py
|
import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
|
Test threading in app server rather than in serverTest.
|
Test threading in app server rather than in serverTest.
|
Python
|
apache-2.0
|
babble/babble,babble/babble,babble/babble,babble/babble,babble/babble,babble/babble
|
Test threading in app server rather than in serverTest.
|
import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
|
<commit_before><commit_msg>Test threading in app server rather than in serverTest.<commit_after>
|
import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
|
Test threading in app server rather than in serverTest.import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
|
<commit_before><commit_msg>Test threading in app server rather than in serverTest.<commit_after>import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
|
|
3b5234a370db18fc51d8ad8573981c85544abb47
|
test/frameworks/lib/fix_auth_failure.py
|
test/frameworks/lib/fix_auth_failure.py
|
#!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
|
Add code to fix the python 2.6.6 auth failure issue.
|
Add code to fix the python 2.6.6 auth failure issue.
git-svn-id: 293778eaa97c8c94097d610b1bd5133a8f478f36@10708 4f837ed2-42f5-46e7-a7a5-fa17313484d4
|
Python
|
bsd-3-clause
|
edina/lockss-daemon,lockss/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,lockss/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon
|
Add code to fix the python 2.6.6 auth failure issue.
git-svn-id: 293778eaa97c8c94097d610b1bd5133a8f478f36@10708 4f837ed2-42f5-46e7-a7a5-fa17313484d4
|
#!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
|
<commit_before><commit_msg>Add code to fix the python 2.6.6 auth failure issue.
git-svn-id: 293778eaa97c8c94097d610b1bd5133a8f478f36@10708 4f837ed2-42f5-46e7-a7a5-fa17313484d4<commit_after>
|
#!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
|
Add code to fix the python 2.6.6 auth failure issue.
git-svn-id: 293778eaa97c8c94097d610b1bd5133a8f478f36@10708 4f837ed2-42f5-46e7-a7a5-fa17313484d4#!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
|
<commit_before><commit_msg>Add code to fix the python 2.6.6 auth failure issue.
git-svn-id: 293778eaa97c8c94097d610b1bd5133a8f478f36@10708 4f837ed2-42f5-46e7-a7a5-fa17313484d4<commit_after>#!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
|
|
a8bd9defcf3359296acf7633041b036213868075
|
install.py
|
install.py
|
#!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
|
Make getting started easier with a handy script
|
Make getting started easier with a handy script
|
Python
|
mit
|
projectweekend/Pi-Camera-Time-Lapse,projectweekend/Pi-Camera-Time-Lapse
|
Make getting started easier with a handy script
|
#!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Make getting started easier with a handy script<commit_after>
|
#!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
|
Make getting started easier with a handy script#!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Make getting started easier with a handy script<commit_after>#!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
|
|
6f3579e6ac32211779481307f8e508469dde7605
|
externalNMEA.py
|
externalNMEA.py
|
from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
|
Add example of how to read from a external GPS outputting NMEA messages.
|
Add example of how to read from a external GPS outputting NMEA messages.
Parsing using pynmea2
|
Python
|
mit
|
waterlinked/examples
|
Add example of how to read from a external GPS outputting NMEA messages.
Parsing using pynmea2
|
from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add example of how to read from a external GPS outputting NMEA messages.
Parsing using pynmea2<commit_after>
|
from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
|
Add example of how to read from a external GPS outputting NMEA messages.
Parsing using pynmea2from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add example of how to read from a external GPS outputting NMEA messages.
Parsing using pynmea2<commit_after>from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
|
|
ac0d0b78b7b4eef913460894fca3af1ace222c7f
|
tests/test_damage.py
|
tests/test_damage.py
|
import unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main()
|
Test template for the Damage class
|
Test template for the Damage class
|
Python
|
mit
|
Enether/python_wow
|
Test template for the Damage class
|
import unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test template for the Damage class<commit_after>
|
import unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main()
|
Test template for the Damage classimport unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test template for the Damage class<commit_after>import unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main()
|
|
9349adb2efa5f0242cf9250d74d714a7e6aea1e9
|
ordination/__init__.py
|
ordination/__init__.py
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
__version__ = '0.1-dev'
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
# Compatible with PEP386
__version__ = '0.1.dev'
|
Make version compatible with PEP386
|
MAINT: Make version compatible with PEP386
|
Python
|
bsd-3-clause
|
xguse/scikit-bio,wdwvt1/scikit-bio,johnchase/scikit-bio,xguse/scikit-bio,colinbrislawn/scikit-bio,Achuth17/scikit-bio,Achuth17/scikit-bio,jdrudolph/scikit-bio,Kleptobismol/scikit-bio,jensreeder/scikit-bio,Jorge-C/bipy,jairideout/scikit-bio,kdmurray91/scikit-bio,averagehat/scikit-bio,wdwvt1/scikit-bio,Kleptobismol/scikit-bio,corburn/scikit-bio,anderspitman/scikit-bio,johnchase/scikit-bio,SamStudio8/scikit-bio,averagehat/scikit-bio,anderspitman/scikit-bio,gregcaporaso/scikit-bio,jensreeder/scikit-bio,corburn/scikit-bio,colinbrislawn/scikit-bio,demis001/scikit-bio,Kleptobismol/scikit-bio,jdrudolph/scikit-bio,jairideout/scikit-bio,kdmurray91/scikit-bio,gregcaporaso/scikit-bio,demis001/scikit-bio,SamStudio8/scikit-bio
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
__version__ = '0.1-dev'
MAINT: Make version compatible with PEP386
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
# Compatible with PEP386
__version__ = '0.1.dev'
|
<commit_before>from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
__version__ = '0.1-dev'
<commit_msg>MAINT: Make version compatible with PEP386<commit_after>
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
# Compatible with PEP386
__version__ = '0.1.dev'
|
from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
__version__ = '0.1-dev'
MAINT: Make version compatible with PEP386from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
# Compatible with PEP386
__version__ = '0.1.dev'
|
<commit_before>from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
__version__ = '0.1-dev'
<commit_msg>MAINT: Make version compatible with PEP386<commit_after>from .base import CA, RDA, CCA
__all__ = ['CA', 'RDA', 'CCA']
#
#from numpy.testing import Tester
#test = Tester().test
# Compatible with PEP386
__version__ = '0.1.dev'
|
c5e047ff0e1cfe35692838365b907db8c3746c4b
|
doc/examples/plot_join_segmentations.py
|
doc/examples/plot_join_segmentations.py
|
"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt
import matplotlib as mpl
from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage import data
coins = data.coins()
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground
ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, max_iter=20, sigma=0, convert2lab=False)
# combine the two
segj = join_segmentations(seg1, seg2)
### Display the result ###
# make a random colormap for a set number of values
def random_cmap(im):
np.random.seed(9)
cmap_array = np.concatenate(
(np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
return mpl.colors.ListedColormap(cmap_array)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
axes[2].imshow(seg2, cmap=random_cmap(seg2), interpolation='nearest')
axes[2].set_title('SLIC superpixels')
axes[3].imshow(segj, cmap=random_cmap(segj), interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
Add join_segmentations example to the gallery
|
Add join_segmentations example to the gallery
|
Python
|
bsd-3-clause
|
Midafi/scikit-image,rjeli/scikit-image,youprofit/scikit-image,GaZ3ll3/scikit-image,keflavich/scikit-image,michaelaye/scikit-image,chintak/scikit-image,rjeli/scikit-image,youprofit/scikit-image,vighneshbirodkar/scikit-image,newville/scikit-image,almarklein/scikit-image,ClinicalGraphics/scikit-image,Britefury/scikit-image,Britefury/scikit-image,warmspringwinds/scikit-image,keflavich/scikit-image,SamHames/scikit-image,vighneshbirodkar/scikit-image,paalge/scikit-image,rjeli/scikit-image,GaZ3ll3/scikit-image,WarrenWeckesser/scikits-image,jwiggins/scikit-image,WarrenWeckesser/scikits-image,michaelpacer/scikit-image,paalge/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,juliusbierk/scikit-image,Midafi/scikit-image,bennlich/scikit-image,warmspringwinds/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,bsipocz/scikit-image,almarklein/scikit-image,SamHames/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,newville/scikit-image,juliusbierk/scikit-image,ofgulban/scikit-image,blink1073/scikit-image,chriscrosscutler/scikit-image,bsipocz/scikit-image,bennlich/scikit-image,SamHames/scikit-image,paalge/scikit-image,SamHames/scikit-image,dpshelio/scikit-image,Hiyorimi/scikit-image,almarklein/scikit-image,dpshelio/scikit-image,jwiggins/scikit-image,robintw/scikit-image,ClinicalGraphics/scikit-image,ajaybhat/scikit-image,Hiyorimi/scikit-image,michaelpacer/scikit-image,ajaybhat/scikit-image,blink1073/scikit-image,oew1v07/scikit-image,michaelaye/scikit-image,emon10005/scikit-image,ofgulban/scikit-image,chintak/scikit-image,robintw/scikit-image,oew1v07/scikit-image,chriscrosscutler/scikit-image
|
Add join_segmentations example to the gallery
|
"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt
import matplotlib as mpl
from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage import data
coins = data.coins()
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground
ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, max_iter=20, sigma=0, convert2lab=False)
# combine the two
segj = join_segmentations(seg1, seg2)
### Display the result ###
# make a random colormap for a set number of values
def random_cmap(im):
np.random.seed(9)
cmap_array = np.concatenate(
(np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
return mpl.colors.ListedColormap(cmap_array)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
axes[2].imshow(seg2, cmap=random_cmap(seg2), interpolation='nearest')
axes[2].set_title('SLIC superpixels')
axes[3].imshow(segj, cmap=random_cmap(segj), interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
<commit_before><commit_msg>Add join_segmentations example to the gallery<commit_after>
|
"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt
import matplotlib as mpl
from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage import data
coins = data.coins()
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground
ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, max_iter=20, sigma=0, convert2lab=False)
# combine the two
segj = join_segmentations(seg1, seg2)
### Display the result ###
# make a random colormap for a set number of values
def random_cmap(im):
np.random.seed(9)
cmap_array = np.concatenate(
(np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
return mpl.colors.ListedColormap(cmap_array)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
axes[2].imshow(seg2, cmap=random_cmap(seg2), interpolation='nearest')
axes[2].set_title('SLIC superpixels')
axes[3].imshow(segj, cmap=random_cmap(segj), interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
Add join_segmentations example to the gallery"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt
import matplotlib as mpl
from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage import data
coins = data.coins()
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground
ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, max_iter=20, sigma=0, convert2lab=False)
# combine the two
segj = join_segmentations(seg1, seg2)
### Display the result ###
# make a random colormap for a set number of values
def random_cmap(im):
np.random.seed(9)
cmap_array = np.concatenate(
(np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
return mpl.colors.ListedColormap(cmap_array)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
axes[2].imshow(seg2, cmap=random_cmap(seg2), interpolation='nearest')
axes[2].set_title('SLIC superpixels')
axes[3].imshow(segj, cmap=random_cmap(segj), interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
<commit_before><commit_msg>Add join_segmentations example to the gallery<commit_after>"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as nd
import matplotlib.pyplot as plt
import matplotlib as mpl
from skimage.filter import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage import data
coins = data.coins()
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30] = background
markers[coins > 150] = foreground
ws = watershed(edges, markers)
seg1 = nd.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
# make the RGB equivalent of `coins`
coins_colour = np.tile(coins[..., np.newaxis], (1, 1, 3))
seg2 = slic(coins_colour, max_iter=20, sigma=0, convert2lab=False)
# combine the two
segj = join_segmentations(seg1, seg2)
### Display the result ###
# make a random colormap for a set number of values
def random_cmap(im):
np.random.seed(9)
cmap_array = np.concatenate(
(np.zeros((1, 3)), np.random.rand(np.ceil(im.max()), 3)))
return mpl.colors.ListedColormap(cmap_array)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
axes[1].imshow(seg1, cmap=random_cmap(seg1), interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
axes[2].imshow(seg2, cmap=random_cmap(seg2), interpolation='nearest')
axes[2].set_title('SLIC superpixels')
axes[3].imshow(segj, cmap=random_cmap(segj), interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
|
2b146388d1804ca4cb069fa07ea5e614a8ee1d14
|
tools/send_to_fcm.py
|
tools/send_to_fcm.py
|
import requests
url = 'https://fcm.googleapis.com/fcm/send'
headers = {'Content-Type': 'application/json',
'Authorization': 'key=AIza...(copy code here)...'}
payload = """
{
"to": "/topics/all",
"notification": {
"title": "Hello world",
"body": "You are beautiful"
}
}
"""
resp = requests.post(url, headers=headers, data=payload)
print(resp)
print(resp.text)
|
Add example of sending push notification via Firebase Cloud Messaging
|
Add example of sending push notification via Firebase Cloud Messaging
|
Python
|
apache-2.0
|
sth-larp/deus-mobile,sth-larp/deus-mobile,sth-larp/deus-mobile,sth-larp/deus-mobile,sth-larp/deus-mobile
|
Add example of sending push notification via Firebase Cloud Messaging
|
import requests
url = 'https://fcm.googleapis.com/fcm/send'
headers = {'Content-Type': 'application/json',
'Authorization': 'key=AIza...(copy code here)...'}
payload = """
{
"to": "/topics/all",
"notification": {
"title": "Hello world",
"body": "You are beautiful"
}
}
"""
resp = requests.post(url, headers=headers, data=payload)
print(resp)
print(resp.text)
|
<commit_before><commit_msg>Add example of sending push notification via Firebase Cloud Messaging<commit_after>
|
import requests
url = 'https://fcm.googleapis.com/fcm/send'
headers = {'Content-Type': 'application/json',
'Authorization': 'key=AIza...(copy code here)...'}
payload = """
{
"to": "/topics/all",
"notification": {
"title": "Hello world",
"body": "You are beautiful"
}
}
"""
resp = requests.post(url, headers=headers, data=payload)
print(resp)
print(resp.text)
|
Add example of sending push notification via Firebase Cloud Messagingimport requests
url = 'https://fcm.googleapis.com/fcm/send'
headers = {'Content-Type': 'application/json',
'Authorization': 'key=AIza...(copy code here)...'}
payload = """
{
"to": "/topics/all",
"notification": {
"title": "Hello world",
"body": "You are beautiful"
}
}
"""
resp = requests.post(url, headers=headers, data=payload)
print(resp)
print(resp.text)
|
<commit_before><commit_msg>Add example of sending push notification via Firebase Cloud Messaging<commit_after>import requests
url = 'https://fcm.googleapis.com/fcm/send'
headers = {'Content-Type': 'application/json',
'Authorization': 'key=AIza...(copy code here)...'}
payload = """
{
"to": "/topics/all",
"notification": {
"title": "Hello world",
"body": "You are beautiful"
}
}
"""
resp = requests.post(url, headers=headers, data=payload)
print(resp)
print(resp.text)
|
|
a3e9097247f4abe660696e5bd19f06e7e5756249
|
python/day9.py
|
python/day9.py
|
#!/usr/local/bin/python3
def parse_input(text):
"""Parse a list of destinations and weights
Returns a list of tuples (source, dest, weight).
Edges in this graph and undirected.
The input contains multiple rows appearing like so:
A to B = W
Where A and B are strings and W is the weight to travel
between them in a graph.
"""
def parse_line(line):
"""Parse a single line of the input"""
parts = line.split()
return (parts[0], parts[2], int(parts[4]))
return [parse_line(line) for line in text.splitlines()]
def test_parse():
"""Test parsing of a list of destinations and weights"""
puzzle_input = '''\
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''
result = parse_input(puzzle_input)
assert result == [
('London', 'Dublin', 464),
('London', 'Belfast', 518),
('Dublin', 'Belfast', 141)
]
|
Add start of Python solution for day 9 (parsing only)
|
Add start of Python solution for day 9 (parsing only)
|
Python
|
mit
|
robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions
|
Add start of Python solution for day 9 (parsing only)
|
#!/usr/local/bin/python3
def parse_input(text):
"""Parse a list of destinations and weights
Returns a list of tuples (source, dest, weight).
Edges in this graph and undirected.
The input contains multiple rows appearing like so:
A to B = W
Where A and B are strings and W is the weight to travel
between them in a graph.
"""
def parse_line(line):
"""Parse a single line of the input"""
parts = line.split()
return (parts[0], parts[2], int(parts[4]))
return [parse_line(line) for line in text.splitlines()]
def test_parse():
"""Test parsing of a list of destinations and weights"""
puzzle_input = '''\
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''
result = parse_input(puzzle_input)
assert result == [
('London', 'Dublin', 464),
('London', 'Belfast', 518),
('Dublin', 'Belfast', 141)
]
|
<commit_before><commit_msg>Add start of Python solution for day 9 (parsing only)<commit_after>
|
#!/usr/local/bin/python3
def parse_input(text):
"""Parse a list of destinations and weights
Returns a list of tuples (source, dest, weight).
Edges in this graph and undirected.
The input contains multiple rows appearing like so:
A to B = W
Where A and B are strings and W is the weight to travel
between them in a graph.
"""
def parse_line(line):
"""Parse a single line of the input"""
parts = line.split()
return (parts[0], parts[2], int(parts[4]))
return [parse_line(line) for line in text.splitlines()]
def test_parse():
"""Test parsing of a list of destinations and weights"""
puzzle_input = '''\
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''
result = parse_input(puzzle_input)
assert result == [
('London', 'Dublin', 464),
('London', 'Belfast', 518),
('Dublin', 'Belfast', 141)
]
|
Add start of Python solution for day 9 (parsing only)#!/usr/local/bin/python3
def parse_input(text):
"""Parse a list of destinations and weights
Returns a list of tuples (source, dest, weight).
Edges in this graph and undirected.
The input contains multiple rows appearing like so:
A to B = W
Where A and B are strings and W is the weight to travel
between them in a graph.
"""
def parse_line(line):
"""Parse a single line of the input"""
parts = line.split()
return (parts[0], parts[2], int(parts[4]))
return [parse_line(line) for line in text.splitlines()]
def test_parse():
"""Test parsing of a list of destinations and weights"""
puzzle_input = '''\
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''
result = parse_input(puzzle_input)
assert result == [
('London', 'Dublin', 464),
('London', 'Belfast', 518),
('Dublin', 'Belfast', 141)
]
|
<commit_before><commit_msg>Add start of Python solution for day 9 (parsing only)<commit_after>#!/usr/local/bin/python3
def parse_input(text):
"""Parse a list of destinations and weights
Returns a list of tuples (source, dest, weight).
Edges in this graph and undirected.
The input contains multiple rows appearing like so:
A to B = W
Where A and B are strings and W is the weight to travel
between them in a graph.
"""
def parse_line(line):
"""Parse a single line of the input"""
parts = line.split()
return (parts[0], parts[2], int(parts[4]))
return [parse_line(line) for line in text.splitlines()]
def test_parse():
"""Test parsing of a list of destinations and weights"""
puzzle_input = '''\
London to Dublin = 464
London to Belfast = 518
Dublin to Belfast = 141'''
result = parse_input(puzzle_input)
assert result == [
('London', 'Dublin', 464),
('London', 'Belfast', 518),
('Dublin', 'Belfast', 141)
]
|
|
b95e5cd706a1cf81e41debae30422345cef3a1ee
|
tests/committerparser.py
|
tests/committerparser.py
|
#!/usr/bin/python
import sys
import getopt
import re
import email.utils
import datetime
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def parse_date(datestr):
d = email.utils.parsedate(datestr)
return datetime.datetime(d[0],d[1],d[2],d[3],d[4],d[5],d[6])
def parse_gitlog(filename=None):
results = {}
commits = {}
if not filename or filename == '-':
fh = sys.stdin
else:
fh = open(filename, 'r+')
commitcount = 0
for line in fh.readlines():
line = line.rstrip()
if line.startswith('commit '):
new_commit = True
commitcount += 1
continue
if line.startswith('Author:'):
author = re.match('Author:\s+(.*)\s+<(.*)>', line)
if author:
email = author.group(2)
continue
if line.startswith('Date:'):
isodate = re.match('Date:\s+(.*)', line)
d = parse_date(isodate.group(1))
continue
if len(line) < 2 and new_commit:
new_commit = False
key = '{0}-{1}'.format(d.year, str(d.month).zfill(2))
if key not in results:
results[key] = []
if key not in commits:
commits[key] = 0
if email not in results[key]:
results[key].append(email)
commits[key] += commitcount
commitcount = 0
fh.close()
return (results, commits)
def count_results(results, commits):
result_str = ''
print('Date\tContributors\tCommits')
for k in sorted(results.iterkeys()):
result_str += '{0}\t{1}\t{2}'.format(k, len(results[k]), commits[k])
result_str += '\n'
return result_str
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if len(opts) > 0:
if '-h' in opts[0] or '--help' in opts[0]:
print('committerparser.py [- | logfilename]')
print(' : Parse commit log from git and print number of commits and unique committers')
print(' : by month. Accepts a filename or reads from stdin.')
return 0
data, counts = parse_gitlog(filename=args[0])
print count_results(data, counts)
if __name__ == "__main__":
sys.exit(main())
|
Add simple parser to return some activity numbers from our git log.
|
Add simple parser to return some activity numbers from our git log.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add simple parser to return some activity numbers from our git log.
|
#!/usr/bin/python
import sys
import getopt
import re
import email.utils
import datetime
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def parse_date(datestr):
d = email.utils.parsedate(datestr)
return datetime.datetime(d[0],d[1],d[2],d[3],d[4],d[5],d[6])
def parse_gitlog(filename=None):
results = {}
commits = {}
if not filename or filename == '-':
fh = sys.stdin
else:
fh = open(filename, 'r+')
commitcount = 0
for line in fh.readlines():
line = line.rstrip()
if line.startswith('commit '):
new_commit = True
commitcount += 1
continue
if line.startswith('Author:'):
author = re.match('Author:\s+(.*)\s+<(.*)>', line)
if author:
email = author.group(2)
continue
if line.startswith('Date:'):
isodate = re.match('Date:\s+(.*)', line)
d = parse_date(isodate.group(1))
continue
if len(line) < 2 and new_commit:
new_commit = False
key = '{0}-{1}'.format(d.year, str(d.month).zfill(2))
if key not in results:
results[key] = []
if key not in commits:
commits[key] = 0
if email not in results[key]:
results[key].append(email)
commits[key] += commitcount
commitcount = 0
fh.close()
return (results, commits)
def count_results(results, commits):
result_str = ''
print('Date\tContributors\tCommits')
for k in sorted(results.iterkeys()):
result_str += '{0}\t{1}\t{2}'.format(k, len(results[k]), commits[k])
result_str += '\n'
return result_str
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if len(opts) > 0:
if '-h' in opts[0] or '--help' in opts[0]:
print('committerparser.py [- | logfilename]')
print(' : Parse commit log from git and print number of commits and unique committers')
print(' : by month. Accepts a filename or reads from stdin.')
return 0
data, counts = parse_gitlog(filename=args[0])
print count_results(data, counts)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add simple parser to return some activity numbers from our git log.<commit_after>
|
#!/usr/bin/python
import sys
import getopt
import re
import email.utils
import datetime
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def parse_date(datestr):
d = email.utils.parsedate(datestr)
return datetime.datetime(d[0],d[1],d[2],d[3],d[4],d[5],d[6])
def parse_gitlog(filename=None):
results = {}
commits = {}
if not filename or filename == '-':
fh = sys.stdin
else:
fh = open(filename, 'r+')
commitcount = 0
for line in fh.readlines():
line = line.rstrip()
if line.startswith('commit '):
new_commit = True
commitcount += 1
continue
if line.startswith('Author:'):
author = re.match('Author:\s+(.*)\s+<(.*)>', line)
if author:
email = author.group(2)
continue
if line.startswith('Date:'):
isodate = re.match('Date:\s+(.*)', line)
d = parse_date(isodate.group(1))
continue
if len(line) < 2 and new_commit:
new_commit = False
key = '{0}-{1}'.format(d.year, str(d.month).zfill(2))
if key not in results:
results[key] = []
if key not in commits:
commits[key] = 0
if email not in results[key]:
results[key].append(email)
commits[key] += commitcount
commitcount = 0
fh.close()
return (results, commits)
def count_results(results, commits):
result_str = ''
print('Date\tContributors\tCommits')
for k in sorted(results.iterkeys()):
result_str += '{0}\t{1}\t{2}'.format(k, len(results[k]), commits[k])
result_str += '\n'
return result_str
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if len(opts) > 0:
if '-h' in opts[0] or '--help' in opts[0]:
print('committerparser.py [- | logfilename]')
print(' : Parse commit log from git and print number of commits and unique committers')
print(' : by month. Accepts a filename or reads from stdin.')
return 0
data, counts = parse_gitlog(filename=args[0])
print count_results(data, counts)
if __name__ == "__main__":
sys.exit(main())
|
Add simple parser to return some activity numbers from our git log.#!/usr/bin/python
import sys
import getopt
import re
import email.utils
import datetime
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def parse_date(datestr):
d = email.utils.parsedate(datestr)
return datetime.datetime(d[0],d[1],d[2],d[3],d[4],d[5],d[6])
def parse_gitlog(filename=None):
results = {}
commits = {}
if not filename or filename == '-':
fh = sys.stdin
else:
fh = open(filename, 'r+')
commitcount = 0
for line in fh.readlines():
line = line.rstrip()
if line.startswith('commit '):
new_commit = True
commitcount += 1
continue
if line.startswith('Author:'):
author = re.match('Author:\s+(.*)\s+<(.*)>', line)
if author:
email = author.group(2)
continue
if line.startswith('Date:'):
isodate = re.match('Date:\s+(.*)', line)
d = parse_date(isodate.group(1))
continue
if len(line) < 2 and new_commit:
new_commit = False
key = '{0}-{1}'.format(d.year, str(d.month).zfill(2))
if key not in results:
results[key] = []
if key not in commits:
commits[key] = 0
if email not in results[key]:
results[key].append(email)
commits[key] += commitcount
commitcount = 0
fh.close()
return (results, commits)
def count_results(results, commits):
result_str = ''
print('Date\tContributors\tCommits')
for k in sorted(results.iterkeys()):
result_str += '{0}\t{1}\t{2}'.format(k, len(results[k]), commits[k])
result_str += '\n'
return result_str
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if len(opts) > 0:
if '-h' in opts[0] or '--help' in opts[0]:
print('committerparser.py [- | logfilename]')
print(' : Parse commit log from git and print number of commits and unique committers')
print(' : by month. Accepts a filename or reads from stdin.')
return 0
data, counts = parse_gitlog(filename=args[0])
print count_results(data, counts)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add simple parser to return some activity numbers from our git log.<commit_after>#!/usr/bin/python
import sys
import getopt
import re
import email.utils
import datetime
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def parse_date(datestr):
d = email.utils.parsedate(datestr)
return datetime.datetime(d[0],d[1],d[2],d[3],d[4],d[5],d[6])
def parse_gitlog(filename=None):
results = {}
commits = {}
if not filename or filename == '-':
fh = sys.stdin
else:
fh = open(filename, 'r+')
commitcount = 0
for line in fh.readlines():
line = line.rstrip()
if line.startswith('commit '):
new_commit = True
commitcount += 1
continue
if line.startswith('Author:'):
author = re.match('Author:\s+(.*)\s+<(.*)>', line)
if author:
email = author.group(2)
continue
if line.startswith('Date:'):
isodate = re.match('Date:\s+(.*)', line)
d = parse_date(isodate.group(1))
continue
if len(line) < 2 and new_commit:
new_commit = False
key = '{0}-{1}'.format(d.year, str(d.month).zfill(2))
if key not in results:
results[key] = []
if key not in commits:
commits[key] = 0
if email not in results[key]:
results[key].append(email)
commits[key] += commitcount
commitcount = 0
fh.close()
return (results, commits)
def count_results(results, commits):
result_str = ''
print('Date\tContributors\tCommits')
for k in sorted(results.iterkeys()):
result_str += '{0}\t{1}\t{2}'.format(k, len(results[k]), commits[k])
result_str += '\n'
return result_str
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
if len(opts) > 0:
if '-h' in opts[0] or '--help' in opts[0]:
print('committerparser.py [- | logfilename]')
print(' : Parse commit log from git and print number of commits and unique committers')
print(' : by month. Accepts a filename or reads from stdin.')
return 0
data, counts = parse_gitlog(filename=args[0])
print count_results(data, counts)
if __name__ == "__main__":
sys.exit(main())
|
|
3020b2084b24f1e00f0b9fc1d06186b1e697647e
|
tests/unit/utils/args.py
|
tests/unit/utils/args.py
|
# -*- coding: utf-8 -*-
# Import Salt Libs
from salt.utils import args
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ArgsTestCase(TestCase):
'''
TestCase for salt.utils.args module
'''
def test_condition_input_string(self):
'''
Test passing a jid on the command line
'''
cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None)
self.assertIsInstance(cmd[2], str)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgsTestCase, needs_daemon=False)
|
Test long jid passed on CLI
|
Test long jid passed on CLI
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Test long jid passed on CLI
|
# -*- coding: utf-8 -*-
# Import Salt Libs
from salt.utils import args
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ArgsTestCase(TestCase):
'''
TestCase for salt.utils.args module
'''
def test_condition_input_string(self):
'''
Test passing a jid on the command line
'''
cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None)
self.assertIsInstance(cmd[2], str)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgsTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Test long jid passed on CLI<commit_after>
|
# -*- coding: utf-8 -*-
# Import Salt Libs
from salt.utils import args
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ArgsTestCase(TestCase):
'''
TestCase for salt.utils.args module
'''
def test_condition_input_string(self):
'''
Test passing a jid on the command line
'''
cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None)
self.assertIsInstance(cmd[2], str)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgsTestCase, needs_daemon=False)
|
Test long jid passed on CLI# -*- coding: utf-8 -*-
# Import Salt Libs
from salt.utils import args
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ArgsTestCase(TestCase):
'''
TestCase for salt.utils.args module
'''
def test_condition_input_string(self):
'''
Test passing a jid on the command line
'''
cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None)
self.assertIsInstance(cmd[2], str)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgsTestCase, needs_daemon=False)
|
<commit_before><commit_msg>Test long jid passed on CLI<commit_after># -*- coding: utf-8 -*-
# Import Salt Libs
from salt.utils import args
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import NO_MOCK, NO_MOCK_REASON
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ArgsTestCase(TestCase):
'''
TestCase for salt.utils.args module
'''
def test_condition_input_string(self):
'''
Test passing a jid on the command line
'''
cmd = args.condition_input(['*', 'foo.bar', 20141020201325675584], None)
self.assertIsInstance(cmd[2], str)
if __name__ == '__main__':
from integration import run_tests
run_tests(ArgsTestCase, needs_daemon=False)
|
|
8f597e766e9ef8014da4391a7109d9b77daf127e
|
tests/user_utils_test.py
|
tests/user_utils_test.py
|
"""Tests for user utility functions."""
from drudge import Vec, sum_, prod_
from drudge.term import parse_terms
def test_sum_prod_utility():
"""Test the summation and product utility."""
v = Vec('v')
vecs = [v[i] for i in range(3)]
v0, v1, v2 = vecs
# The proxy object cannot be directly compared.
assert parse_terms(sum_(vecs)) == parse_terms(v0 + v1 + v2)
assert parse_terms(prod_(vecs)) == parse_terms(v0 * v1 * v2)
assert sum_([]) == 0
assert prod_([]) == 1
|
Add tests for user utilities sum_ and prod_
|
Add tests for user utilities sum_ and prod_
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add tests for user utilities sum_ and prod_
|
"""Tests for user utility functions."""
from drudge import Vec, sum_, prod_
from drudge.term import parse_terms
def test_sum_prod_utility():
"""Test the summation and product utility."""
v = Vec('v')
vecs = [v[i] for i in range(3)]
v0, v1, v2 = vecs
# The proxy object cannot be directly compared.
assert parse_terms(sum_(vecs)) == parse_terms(v0 + v1 + v2)
assert parse_terms(prod_(vecs)) == parse_terms(v0 * v1 * v2)
assert sum_([]) == 0
assert prod_([]) == 1
|
<commit_before><commit_msg>Add tests for user utilities sum_ and prod_<commit_after>
|
"""Tests for user utility functions."""
from drudge import Vec, sum_, prod_
from drudge.term import parse_terms
def test_sum_prod_utility():
"""Test the summation and product utility."""
v = Vec('v')
vecs = [v[i] for i in range(3)]
v0, v1, v2 = vecs
# The proxy object cannot be directly compared.
assert parse_terms(sum_(vecs)) == parse_terms(v0 + v1 + v2)
assert parse_terms(prod_(vecs)) == parse_terms(v0 * v1 * v2)
assert sum_([]) == 0
assert prod_([]) == 1
|
Add tests for user utilities sum_ and prod_"""Tests for user utility functions."""
from drudge import Vec, sum_, prod_
from drudge.term import parse_terms
def test_sum_prod_utility():
"""Test the summation and product utility."""
v = Vec('v')
vecs = [v[i] for i in range(3)]
v0, v1, v2 = vecs
# The proxy object cannot be directly compared.
assert parse_terms(sum_(vecs)) == parse_terms(v0 + v1 + v2)
assert parse_terms(prod_(vecs)) == parse_terms(v0 * v1 * v2)
assert sum_([]) == 0
assert prod_([]) == 1
|
<commit_before><commit_msg>Add tests for user utilities sum_ and prod_<commit_after>"""Tests for user utility functions."""
from drudge import Vec, sum_, prod_
from drudge.term import parse_terms
def test_sum_prod_utility():
"""Test the summation and product utility."""
v = Vec('v')
vecs = [v[i] for i in range(3)]
v0, v1, v2 = vecs
# The proxy object cannot be directly compared.
assert parse_terms(sum_(vecs)) == parse_terms(v0 + v1 + v2)
assert parse_terms(prod_(vecs)) == parse_terms(v0 * v1 * v2)
assert sum_([]) == 0
assert prod_([]) == 1
|
|
06c3a417f0270d76a7fcc9e94fdb40f9952b9d12
|
src/wirecloud/fiware/views.py
|
src/wirecloud/fiware/views.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def login(request):
if request.user.is_authenticated():
url = request.GET.get(REDIRECT_FIELD_NAME, '/')
else:
url = reverse('socialauth_begin', kwargs={'backend': 'fiware'}) + '?' + request.GET.urlencode()
return HttpResponseRedirect(url)
|
Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server
|
Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server
|
Python
|
agpl-3.0
|
jpajuelo/wirecloud,rockneurotiko/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud,jpajuelo/wirecloud,jpajuelo/wirecloud,rockneurotiko/wirecloud
|
Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def login(request):
if request.user.is_authenticated():
url = request.GET.get(REDIRECT_FIELD_NAME, '/')
else:
url = reverse('socialauth_begin', kwargs={'backend': 'fiware'}) + '?' + request.GET.urlencode()
return HttpResponseRedirect(url)
|
<commit_before><commit_msg>Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def login(request):
if request.user.is_authenticated():
url = request.GET.get(REDIRECT_FIELD_NAME, '/')
else:
url = reverse('socialauth_begin', kwargs={'backend': 'fiware'}) + '?' + request.GET.urlencode()
return HttpResponseRedirect(url)
|
Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server# -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def login(request):
if request.user.is_authenticated():
url = request.GET.get(REDIRECT_FIELD_NAME, '/')
else:
url = reverse('socialauth_begin', kwargs={'backend': 'fiware'}) + '?' + request.GET.urlencode()
return HttpResponseRedirect(url)
|
<commit_before><commit_msg>Add a login view that automatically starts the oauth2 flow for authenticating using the IdM server<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def login(request):
if request.user.is_authenticated():
url = request.GET.get(REDIRECT_FIELD_NAME, '/')
else:
url = reverse('socialauth_begin', kwargs={'backend': 'fiware'}) + '?' + request.GET.urlencode()
return HttpResponseRedirect(url)
|
|
c51bb87714ade403aeabc9b4b4c62b4ee3a7a8c5
|
scripts/test-scrobble.py
|
scripts/test-scrobble.py
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
Add test script for checking to see if scrobbling works on new installs
|
Add test script for checking to see if scrobbling works on new installs
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add test script for checking to see if scrobbling works on new installs
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
<commit_before><commit_msg>Add test script for checking to see if scrobbling works on new installs<commit_after>
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
Add test script for checking to see if scrobbling works on new installs#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
<commit_before><commit_msg>Add test script for checking to see if scrobbling works on new installs<commit_after>#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
|
97b9ee00277fa35c92886b1ed39864eba3707dce
|
bluebottle/activities/migrations/0020_auto_20200224_1005.py
|
bluebottle/activities/migrations/0020_auto_20200224_1005.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add organizer permissions to staff group
|
Add organizer permissions to staff group
BB-16555 #resolve
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add organizer permissions to staff group
BB-16555 #resolve
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add organizer permissions to staff group
BB-16555 #resolve<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
Add organizer permissions to staff group
BB-16555 #resolve# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
<commit_before><commit_msg>Add organizer permissions to staff group
BB-16555 #resolve<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
|
|
efbf98235b82c954364f35cb09f63006e23346e2
|
tests/test_lang_javascript.py
|
tests/test_lang_javascript.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
|
Create tests for JavaScript parser.
|
Create tests for JavaScript parser.
|
Python
|
apache-2.0
|
eddieantonio/ad-hoc-miner,naturalness/sensibility,naturalness/sensibility,eddieantonio/ad-hoc-miner,naturalness/sensibility,eddieantonio/ad-hoc-miner,eddieantonio/ad-hoc-miner,naturalness/sensibility
|
Create tests for JavaScript parser.
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
|
<commit_before><commit_msg>Create tests for JavaScript parser.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
|
Create tests for JavaScript parser.#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
|
<commit_before><commit_msg>Create tests for JavaScript parser.<commit_after>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
|
|
09ee7c5972f3a508355f6dfd49ff05d8de482cd9
|
shs_example.py
|
shs_example.py
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
|
Add example of slide-hold-slide test
|
Add example of slide-hold-slide test
|
Python
|
mit
|
jrleeman/rsfmodel
|
Add example of slide-hold-slide test
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
|
<commit_before><commit_msg>Add example of slide-hold-slide test<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
|
Add example of slide-hold-slide testimport numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
|
<commit_before><commit_msg>Add example of slide-hold-slide test<commit_after>import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
|
|
4f7b103d6c5fa3b07abb23e346caa995a7f803ef
|
tests/completion.py
|
tests/completion.py
|
from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
|
import sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
|
Make new test fail correctlyish
|
Make new test fail correctlyish
|
Python
|
bsd-2-clause
|
mkusz/invoke,kejbaly2/invoke,mkusz/invoke,pyinvoke/invoke,mattrobenolt/invoke,kejbaly2/invoke,tyewang/invoke,frol/invoke,singingwolfboy/invoke,pyinvoke/invoke,frol/invoke,pfmoore/invoke,pfmoore/invoke,mattrobenolt/invoke
|
from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
Make new test fail correctlyish
|
import sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
|
<commit_before>from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
<commit_msg>Make new test fail correctlyish<commit_after>
|
import sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
|
from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
Make new test fail correctlyishimport sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
|
<commit_before>from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
<commit_msg>Make new test fail correctlyish<commit_after>import sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
|
661943403b9a4b7c28bf9e0a59ba937dc2298fef
|
netmiko/ssh_autodetect.py
|
netmiko/ssh_autodetect.py
|
"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
|
Add SSH auto detect feature
|
Add SSH auto detect feature
|
Python
|
mit
|
isidroamv/netmiko,ktbyers/netmiko,fooelisa/netmiko,ktbyers/netmiko,isidroamv/netmiko,fooelisa/netmiko
|
Add SSH auto detect feature
|
"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
|
<commit_before><commit_msg>Add SSH auto detect feature<commit_after>
|
"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
|
Add SSH auto detect feature"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
|
<commit_before><commit_msg>Add SSH auto detect feature<commit_after>"""
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
|
|
0d32800fec1419eac39711fd8c94ce07896cddaf
|
sknn/tests/test_gaussian.py
|
sknn/tests/test_gaussian.py
|
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
|
Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.
|
Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.
|
Python
|
bsd-3-clause
|
gticket/scikit-neuralnetwork,IndraVikas/scikit-neuralnetwork,capitancambio/scikit-neuralnetwork,agomariz/scikit-neuralnetwork,freakynit/scikit-neuralnetwork,KhanSuleyman/scikit-neuralnetwork,aigamedev/scikit-neuralnetwork
|
Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.
|
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
|
<commit_before><commit_msg>Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.<commit_after>
|
import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
|
Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
|
<commit_before><commit_msg>Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output.<commit_after>import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
|
|
74ffdab0c54f332b8787aea04582ee7312a34b4c
|
src/ggrc/migrations/versions/20161123124848_1f5c3e0025da_remove_control_id_column_from_.py
|
src/ggrc/migrations/versions/20161123124848_1f5c3e0025da_remove_control_id_column_from_.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
|
Remove control_id column from assessments table
|
Remove control_id column from assessments table
|
Python
|
apache-2.0
|
selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core
|
Remove control_id column from assessments table
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
|
<commit_before><commit_msg>Remove control_id column from assessments table<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
|
Remove control_id column from assessments table# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
|
<commit_before><commit_msg>Remove control_id column from assessments table<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
|
|
372bd768acae6fbf425271b193d1734e5001c71a
|
4/Solution.py
|
4/Solution.py
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2))
|
Add initial working solution 4
|
Add initial working solution 4
|
Python
|
mit
|
xliiauo/leetcode,xliiauo/leetcode,xiao0720/leetcode,xliiauo/leetcode,xiao0720/leetcode
|
Add initial working solution 4
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2))
|
<commit_before><commit_msg>Add initial working solution 4<commit_after>
|
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2))
|
Add initial working solution 4class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2))
|
<commit_before><commit_msg>Add initial working solution 4<commit_after>class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2))
|
|
b66ad576230fb7c96a8f5c6c7b6af8a8e4c8d0b5
|
vmf/games/source.py
|
vmf/games/source.py
|
"""
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
|
Add module for Source Engine game logic entities
|
Add module for Source Engine game logic entities
|
Python
|
bsd-2-clause
|
BHSPitMonkey/vmflib
|
Add module for Source Engine game logic entities
|
"""
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
|
<commit_before><commit_msg>Add module for Source Engine game logic entities<commit_after>
|
"""
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
|
Add module for Source Engine game logic entities"""
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
|
<commit_before><commit_msg>Add module for Source Engine game logic entities<commit_after>"""
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
|
|
a9388f7d4c4747e6710d20d618b57f19360cb69c
|
tests/adapters/compliance_tests/remove_vlan_test.py
|
tests/adapters/compliance_tests/remove_vlan_test.py
|
# Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
class RemoveVlanTest(ComplianceTestCase):
_dev_sample = "brocade"
def setUp(self):
super(RemoveVlanTest, self).setUp()
self.client.add_vlan(1000)
def tearDown(self):
self.janitor.remove_vlan(1000)
super(RemoveVlanTest, self).tearDown()
def test_removes_vlan_from_get_vlan(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.get_vlan(1000)
def test_removes_vlan_raise_when_vlan_is_already_removed(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(1000)
def test_removes_vlan_is_removed_from_list(self):
vlan_count = len(self.client.get_vlans())
self.client.remove_vlan(1000)
assert_that(len(self.client.get_vlans()), equal_to(vlan_count - 1))
|
Add remove vlan compliance test
|
Add remove vlan compliance test
|
Python
|
apache-2.0
|
lindycoder/netman,internaphosting/netman,internap/netman
|
Add remove vlan compliance test
|
# Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
class RemoveVlanTest(ComplianceTestCase):
_dev_sample = "brocade"
def setUp(self):
super(RemoveVlanTest, self).setUp()
self.client.add_vlan(1000)
def tearDown(self):
self.janitor.remove_vlan(1000)
super(RemoveVlanTest, self).tearDown()
def test_removes_vlan_from_get_vlan(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.get_vlan(1000)
def test_removes_vlan_raise_when_vlan_is_already_removed(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(1000)
def test_removes_vlan_is_removed_from_list(self):
vlan_count = len(self.client.get_vlans())
self.client.remove_vlan(1000)
assert_that(len(self.client.get_vlans()), equal_to(vlan_count - 1))
|
<commit_before><commit_msg>Add remove vlan compliance test<commit_after>
|
# Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
class RemoveVlanTest(ComplianceTestCase):
_dev_sample = "brocade"
def setUp(self):
super(RemoveVlanTest, self).setUp()
self.client.add_vlan(1000)
def tearDown(self):
self.janitor.remove_vlan(1000)
super(RemoveVlanTest, self).tearDown()
def test_removes_vlan_from_get_vlan(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.get_vlan(1000)
def test_removes_vlan_raise_when_vlan_is_already_removed(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(1000)
def test_removes_vlan_is_removed_from_list(self):
vlan_count = len(self.client.get_vlans())
self.client.remove_vlan(1000)
assert_that(len(self.client.get_vlans()), equal_to(vlan_count - 1))
|
Add remove vlan compliance test# Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
class RemoveVlanTest(ComplianceTestCase):
_dev_sample = "brocade"
def setUp(self):
super(RemoveVlanTest, self).setUp()
self.client.add_vlan(1000)
def tearDown(self):
self.janitor.remove_vlan(1000)
super(RemoveVlanTest, self).tearDown()
def test_removes_vlan_from_get_vlan(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.get_vlan(1000)
def test_removes_vlan_raise_when_vlan_is_already_removed(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(1000)
def test_removes_vlan_is_removed_from_list(self):
vlan_count = len(self.client.get_vlans())
self.client.remove_vlan(1000)
assert_that(len(self.client.get_vlans()), equal_to(vlan_count - 1))
|
<commit_before><commit_msg>Add remove vlan compliance test<commit_after># Copyright 2019 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hamcrest import assert_that, equal_to
from netman.core.objects.exceptions import UnknownVlan
from tests.adapters.compliance_test_case import ComplianceTestCase
class RemoveVlanTest(ComplianceTestCase):
_dev_sample = "brocade"
def setUp(self):
super(RemoveVlanTest, self).setUp()
self.client.add_vlan(1000)
def tearDown(self):
self.janitor.remove_vlan(1000)
super(RemoveVlanTest, self).tearDown()
def test_removes_vlan_from_get_vlan(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.get_vlan(1000)
def test_removes_vlan_raise_when_vlan_is_already_removed(self):
self.client.remove_vlan(1000)
with self.assertRaises(UnknownVlan):
self.client.remove_vlan(1000)
def test_removes_vlan_is_removed_from_list(self):
vlan_count = len(self.client.get_vlans())
self.client.remove_vlan(1000)
assert_that(len(self.client.get_vlans()), equal_to(vlan_count - 1))
|
|
9ffe40aaf5ece521020258c4b31fbdb514e02b69
|
manager/utilities.py
|
manager/utilities.py
|
from typing import Optional
from manager.models import Package, Build
def get_latest_build(package: Package) -> Optional[Build]:
try:
return Build.objects.filter(package=package, status=Build.SUCCESS).order_by('-id')[0]
except IndexError:
return None
|
Add utility function for get latest Build
|
Add utility function for get latest Build
|
Python
|
mit
|
colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager
|
Add utility function for get latest Build
|
from typing import Optional
from manager.models import Package, Build
def get_latest_build(package: Package) -> Optional[Build]:
try:
return Build.objects.filter(package=package, status=Build.SUCCESS).order_by('-id')[0]
except IndexError:
return None
|
<commit_before><commit_msg>Add utility function for get latest Build<commit_after>
|
from typing import Optional
from manager.models import Package, Build
def get_latest_build(package: Package) -> Optional[Build]:
try:
return Build.objects.filter(package=package, status=Build.SUCCESS).order_by('-id')[0]
except IndexError:
return None
|
Add utility function for get latest Buildfrom typing import Optional
from manager.models import Package, Build
def get_latest_build(package: Package) -> Optional[Build]:
try:
return Build.objects.filter(package=package, status=Build.SUCCESS).order_by('-id')[0]
except IndexError:
return None
|
<commit_before><commit_msg>Add utility function for get latest Build<commit_after>from typing import Optional
from manager.models import Package, Build
def get_latest_build(package: Package) -> Optional[Build]:
try:
return Build.objects.filter(package=package, status=Build.SUCCESS).order_by('-id')[0]
except IndexError:
return None
|
|
0b55d97573fcd196a318b3c901f6dcac1b0a4eef
|
chrome/test/functional/test_basic.py
|
chrome/test/functional/test_basic.py
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pyauto import PyUITest
class SimpleTest(PyUITest):
def testCanOpenGoogle(self):
self.NavigateToURL("http://www.google.com")
if __name__ == '__main__':
unittest.main()
|
Create a placeholder for pyauto test scripts.
|
Create a placeholder for pyauto test scripts.
Including a hello world script.
Review URL: http://codereview.chromium.org/668004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@40579 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
ropik/chromium,adobe/chromium,ropik/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,ropik/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,ropik/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,ropik/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,adobe/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,yitian134/chromium
|
Create a placeholder for pyauto test scripts.
Including a hello world script.
Review URL: http://codereview.chromium.org/668004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@40579 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pyauto import PyUITest
class SimpleTest(PyUITest):
def testCanOpenGoogle(self):
self.NavigateToURL("http://www.google.com")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create a placeholder for pyauto test scripts.
Including a hello world script.
Review URL: http://codereview.chromium.org/668004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@40579 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pyauto import PyUITest
class SimpleTest(PyUITest):
def testCanOpenGoogle(self):
self.NavigateToURL("http://www.google.com")
if __name__ == '__main__':
unittest.main()
|
Create a placeholder for pyauto test scripts.
Including a hello world script.
Review URL: http://codereview.chromium.org/668004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@40579 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pyauto import PyUITest
class SimpleTest(PyUITest):
def testCanOpenGoogle(self):
self.NavigateToURL("http://www.google.com")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create a placeholder for pyauto test scripts.
Including a hello world script.
Review URL: http://codereview.chromium.org/668004
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@40579 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from pyauto import PyUITest
class SimpleTest(PyUITest):
def testCanOpenGoogle(self):
self.NavigateToURL("http://www.google.com")
if __name__ == '__main__':
unittest.main()
|
|
6906f574fad033f0b68fe7a5a35f4fcef1207ee0
|
Get_Webpage_Source.py
|
Get_Webpage_Source.py
|
import sublime
import sublime_plugin
import urllib
class GetWebpageSourceCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = self.view.substr(self.view.sel()[0])
if len(url) == 0:
return
output = ""
r = urllib.request.urlopen(url)
output = str(r.read(), encoding='utf8')
newView = sublime.active_window().new_file()
newView.insert(edit, 0, output)
|
Load the source from a highlighted URL into a new window
|
Load the source from a highlighted URL into a new window
|
Python
|
mit
|
RichardHyde/SublimeText.Packages
|
Load the source from a highlighted URL into a new window
|
import sublime
import sublime_plugin
import urllib
class GetWebpageSourceCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = self.view.substr(self.view.sel()[0])
if len(url) == 0:
return
output = ""
r = urllib.request.urlopen(url)
output = str(r.read(), encoding='utf8')
newView = sublime.active_window().new_file()
newView.insert(edit, 0, output)
|
<commit_before><commit_msg>Load the source from a highlighted URL into a new window<commit_after>
|
import sublime
import sublime_plugin
import urllib
class GetWebpageSourceCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = self.view.substr(self.view.sel()[0])
if len(url) == 0:
return
output = ""
r = urllib.request.urlopen(url)
output = str(r.read(), encoding='utf8')
newView = sublime.active_window().new_file()
newView.insert(edit, 0, output)
|
Load the source from a highlighted URL into a new windowimport sublime
import sublime_plugin
import urllib
class GetWebpageSourceCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = self.view.substr(self.view.sel()[0])
if len(url) == 0:
return
output = ""
r = urllib.request.urlopen(url)
output = str(r.read(), encoding='utf8')
newView = sublime.active_window().new_file()
newView.insert(edit, 0, output)
|
<commit_before><commit_msg>Load the source from a highlighted URL into a new window<commit_after>import sublime
import sublime_plugin
import urllib
class GetWebpageSourceCommand(sublime_plugin.TextCommand):
def run(self, edit):
url = self.view.substr(self.view.sel()[0])
if len(url) == 0:
return
output = ""
r = urllib.request.urlopen(url)
output = str(r.read(), encoding='utf8')
newView = sublime.active_window().new_file()
newView.insert(edit, 0, output)
|
|
93d80604003e1b3b21498df01f7647e7cea69a5f
|
cybox/test/objects/win_event_test.py
|
cybox/test/objects/win_event_test.py
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_event_object import WinEvent
from cybox.test.objects import ObjectTestCase
class TestWinThread(ObjectTestCase, unittest.TestCase):
object_type = "WindowsEventObjectType"
klass = WinEvent
_full_dict = {
'name': u"Object Open",
'handle': {
'name': u"Event Handle",
'type': u"Event",
'xsi:type': "WindowsHandleObjectType",
},
'type': u"Success",
'xsi:type': "WindowsEventObjectType",
}
if __name__ == "__main__":
unittest.main()
|
Add basic WinEvent object tests.
|
Add basic WinEvent object tests.
|
Python
|
bsd-3-clause
|
CybOXProject/python-cybox
|
Add basic WinEvent object tests.
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_event_object import WinEvent
from cybox.test.objects import ObjectTestCase
class TestWinThread(ObjectTestCase, unittest.TestCase):
object_type = "WindowsEventObjectType"
klass = WinEvent
_full_dict = {
'name': u"Object Open",
'handle': {
'name': u"Event Handle",
'type': u"Event",
'xsi:type': "WindowsHandleObjectType",
},
'type': u"Success",
'xsi:type': "WindowsEventObjectType",
}
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add basic WinEvent object tests.<commit_after>
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_event_object import WinEvent
from cybox.test.objects import ObjectTestCase
class TestWinThread(ObjectTestCase, unittest.TestCase):
object_type = "WindowsEventObjectType"
klass = WinEvent
_full_dict = {
'name': u"Object Open",
'handle': {
'name': u"Event Handle",
'type': u"Event",
'xsi:type': "WindowsHandleObjectType",
},
'type': u"Success",
'xsi:type': "WindowsEventObjectType",
}
if __name__ == "__main__":
unittest.main()
|
Add basic WinEvent object tests.# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_event_object import WinEvent
from cybox.test.objects import ObjectTestCase
class TestWinThread(ObjectTestCase, unittest.TestCase):
object_type = "WindowsEventObjectType"
klass = WinEvent
_full_dict = {
'name': u"Object Open",
'handle': {
'name': u"Event Handle",
'type': u"Event",
'xsi:type': "WindowsHandleObjectType",
},
'type': u"Success",
'xsi:type': "WindowsEventObjectType",
}
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add basic WinEvent object tests.<commit_after># Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.win_event_object import WinEvent
from cybox.test.objects import ObjectTestCase
class TestWinThread(ObjectTestCase, unittest.TestCase):
object_type = "WindowsEventObjectType"
klass = WinEvent
_full_dict = {
'name': u"Object Open",
'handle': {
'name': u"Event Handle",
'type': u"Event",
'xsi:type': "WindowsHandleObjectType",
},
'type': u"Success",
'xsi:type': "WindowsEventObjectType",
}
if __name__ == "__main__":
unittest.main()
|
|
89d4c1420805a6f2e491f1ab250722cdcf950bd8
|
ndtable/engine/mv.py
|
ndtable/engine/mv.py
|
import sys
import time
import numpy as np
from minivect import miniast
from minivect import specializers
from minivect import minitypes
from minivect.ctypes_conversion import get_data_pointer, \
get_pointer, convert_to_ctypes
from ndtable.datashape.coretypes import var_generator
from ndtable.expr.visitor import MroTransformer
context_debug = 0
class LazyLLVMContext(miniast.LLVMContext):
debug = context_debug
def stridesvar(self, variable):
return miniast.StridePointer(self.pos, minitypes.NPyIntp.pointer(),
variable)
#------------------------------------------------------------------------
# Blaze Pipeline
#------------------------------------------------------------------------
def setup(debug=0):
context = LazyLLVMContext()
builder = context.astbuilder
ccontext = miniast.CContext()
ccontext.debug = debug
return context, ccontext, builder
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_blaze_pointer(numpy_array, array_type):
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def specialize(specializer_cls, ast, context):
specializers = [specializer_cls]
result = iter(context.run(ast, specializers)).next()
_, specialized_ast, _, code_output = result
return specialized_ast, code_output
#------------------------------------------------------------------------
# Mapper
#------------------------------------------------------------------------
class Minivect(MroTransformer):
def __init__(self, context):
self.builder = context.astbuilder
self.variables = []
def ArrayNode(self, node):
return node
def App(self, node):
lhs, rhs = self.visit(node.children)[0]
op = node.operator.op
return self.builder.binop(lhs.type, op, lhs, rhs)
def BinaryOp(self, node):
lhs, rhs = map(self.visit, node.children)
if isinstance(lhs, list):
lhs = self.visit(lhs)
if isinstance(rhs, list):
rhs = self.visit(rhs)
return lhs, rhs
def Literal(self, node):
minidtype = node.datashape.to_minitype()
variable = self.builder.variable(minidtype, str(id(node)))
variable.value = node.val
self.variables.append(variable)
return variable
|
Work towards integration NDTable <-> Minivect
|
Work towards integration NDTable <-> Minivect
|
Python
|
bsd-2-clause
|
seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core,seibert/blaze-core
|
Work towards integration NDTable <-> Minivect
|
import sys
import time
import numpy as np
from minivect import miniast
from minivect import specializers
from minivect import minitypes
from minivect.ctypes_conversion import get_data_pointer, \
get_pointer, convert_to_ctypes
from ndtable.datashape.coretypes import var_generator
from ndtable.expr.visitor import MroTransformer
context_debug = 0
class LazyLLVMContext(miniast.LLVMContext):
debug = context_debug
def stridesvar(self, variable):
return miniast.StridePointer(self.pos, minitypes.NPyIntp.pointer(),
variable)
#------------------------------------------------------------------------
# Blaze Pipeline
#------------------------------------------------------------------------
def setup(debug=0):
context = LazyLLVMContext()
builder = context.astbuilder
ccontext = miniast.CContext()
ccontext.debug = debug
return context, ccontext, builder
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_blaze_pointer(numpy_array, array_type):
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def specialize(specializer_cls, ast, context):
specializers = [specializer_cls]
result = iter(context.run(ast, specializers)).next()
_, specialized_ast, _, code_output = result
return specialized_ast, code_output
#------------------------------------------------------------------------
# Mapper
#------------------------------------------------------------------------
class Minivect(MroTransformer):
def __init__(self, context):
self.builder = context.astbuilder
self.variables = []
def ArrayNode(self, node):
return node
def App(self, node):
lhs, rhs = self.visit(node.children)[0]
op = node.operator.op
return self.builder.binop(lhs.type, op, lhs, rhs)
def BinaryOp(self, node):
lhs, rhs = map(self.visit, node.children)
if isinstance(lhs, list):
lhs = self.visit(lhs)
if isinstance(rhs, list):
rhs = self.visit(rhs)
return lhs, rhs
def Literal(self, node):
minidtype = node.datashape.to_minitype()
variable = self.builder.variable(minidtype, str(id(node)))
variable.value = node.val
self.variables.append(variable)
return variable
|
<commit_before><commit_msg>Work towards integration NDTable <-> Minivect<commit_after>
|
import sys
import time
import numpy as np
from minivect import miniast
from minivect import specializers
from minivect import minitypes
from minivect.ctypes_conversion import get_data_pointer, \
get_pointer, convert_to_ctypes
from ndtable.datashape.coretypes import var_generator
from ndtable.expr.visitor import MroTransformer
context_debug = 0
class LazyLLVMContext(miniast.LLVMContext):
debug = context_debug
def stridesvar(self, variable):
return miniast.StridePointer(self.pos, minitypes.NPyIntp.pointer(),
variable)
#------------------------------------------------------------------------
# Blaze Pipeline
#------------------------------------------------------------------------
def setup(debug=0):
context = LazyLLVMContext()
builder = context.astbuilder
ccontext = miniast.CContext()
ccontext.debug = debug
return context, ccontext, builder
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_blaze_pointer(numpy_array, array_type):
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def specialize(specializer_cls, ast, context):
specializers = [specializer_cls]
result = iter(context.run(ast, specializers)).next()
_, specialized_ast, _, code_output = result
return specialized_ast, code_output
#------------------------------------------------------------------------
# Mapper
#------------------------------------------------------------------------
class Minivect(MroTransformer):
def __init__(self, context):
self.builder = context.astbuilder
self.variables = []
def ArrayNode(self, node):
return node
def App(self, node):
lhs, rhs = self.visit(node.children)[0]
op = node.operator.op
return self.builder.binop(lhs.type, op, lhs, rhs)
def BinaryOp(self, node):
lhs, rhs = map(self.visit, node.children)
if isinstance(lhs, list):
lhs = self.visit(lhs)
if isinstance(rhs, list):
rhs = self.visit(rhs)
return lhs, rhs
def Literal(self, node):
minidtype = node.datashape.to_minitype()
variable = self.builder.variable(minidtype, str(id(node)))
variable.value = node.val
self.variables.append(variable)
return variable
|
Work towards integration NDTable <-> Minivectimport sys
import time
import numpy as np
from minivect import miniast
from minivect import specializers
from minivect import minitypes
from minivect.ctypes_conversion import get_data_pointer, \
get_pointer, convert_to_ctypes
from ndtable.datashape.coretypes import var_generator
from ndtable.expr.visitor import MroTransformer
context_debug = 0
class LazyLLVMContext(miniast.LLVMContext):
debug = context_debug
def stridesvar(self, variable):
return miniast.StridePointer(self.pos, minitypes.NPyIntp.pointer(),
variable)
#------------------------------------------------------------------------
# Blaze Pipeline
#------------------------------------------------------------------------
def setup(debug=0):
context = LazyLLVMContext()
builder = context.astbuilder
ccontext = miniast.CContext()
ccontext.debug = debug
return context, ccontext, builder
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_blaze_pointer(numpy_array, array_type):
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def specialize(specializer_cls, ast, context):
specializers = [specializer_cls]
result = iter(context.run(ast, specializers)).next()
_, specialized_ast, _, code_output = result
return specialized_ast, code_output
#------------------------------------------------------------------------
# Mapper
#------------------------------------------------------------------------
class Minivect(MroTransformer):
def __init__(self, context):
self.builder = context.astbuilder
self.variables = []
def ArrayNode(self, node):
return node
def App(self, node):
lhs, rhs = self.visit(node.children)[0]
op = node.operator.op
return self.builder.binop(lhs.type, op, lhs, rhs)
def BinaryOp(self, node):
lhs, rhs = map(self.visit, node.children)
if isinstance(lhs, list):
lhs = self.visit(lhs)
if isinstance(rhs, list):
rhs = self.visit(rhs)
return lhs, rhs
def Literal(self, node):
minidtype = node.datashape.to_minitype()
variable = self.builder.variable(minidtype, str(id(node)))
variable.value = node.val
self.variables.append(variable)
return variable
|
<commit_before><commit_msg>Work towards integration NDTable <-> Minivect<commit_after>import sys
import time
import numpy as np
from minivect import miniast
from minivect import specializers
from minivect import minitypes
from minivect.ctypes_conversion import get_data_pointer, \
get_pointer, convert_to_ctypes
from ndtable.datashape.coretypes import var_generator
from ndtable.expr.visitor import MroTransformer
context_debug = 0
class LazyLLVMContext(miniast.LLVMContext):
debug = context_debug
def stridesvar(self, variable):
return miniast.StridePointer(self.pos, minitypes.NPyIntp.pointer(),
variable)
#------------------------------------------------------------------------
# Blaze Pipeline
#------------------------------------------------------------------------
def setup(debug=0):
context = LazyLLVMContext()
builder = context.astbuilder
ccontext = miniast.CContext()
ccontext.debug = debug
return context, ccontext, builder
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def get_blaze_pointer(numpy_array, array_type):
dtype_pointer = array_type.dtype.pointer()
return numpy_array.ctypes.data_as(convert_to_ctypes(dtype_pointer))
def specialize(specializer_cls, ast, context):
specializers = [specializer_cls]
result = iter(context.run(ast, specializers)).next()
_, specialized_ast, _, code_output = result
return specialized_ast, code_output
#------------------------------------------------------------------------
# Mapper
#------------------------------------------------------------------------
class Minivect(MroTransformer):
def __init__(self, context):
self.builder = context.astbuilder
self.variables = []
def ArrayNode(self, node):
return node
def App(self, node):
lhs, rhs = self.visit(node.children)[0]
op = node.operator.op
return self.builder.binop(lhs.type, op, lhs, rhs)
def BinaryOp(self, node):
lhs, rhs = map(self.visit, node.children)
if isinstance(lhs, list):
lhs = self.visit(lhs)
if isinstance(rhs, list):
rhs = self.visit(rhs)
return lhs, rhs
def Literal(self, node):
minidtype = node.datashape.to_minitype()
variable = self.builder.variable(minidtype, str(id(node)))
variable.value = node.val
self.variables.append(variable)
return variable
|
|
1146ab654c8b0d6f982f19bafed91f18edb877f3
|
tests/rules/test_dirty_unzip.py
|
tests/rules/test_dirty_unzip.py
|
import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def zip_error(tmpdir):
path = os.path.join(str(tmpdir), 'foo.zip')
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {'foo.zip', 'a', 'b', 'c'})
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_match(zip_error, script):
assert match(Command(script=script), None)
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_side_effect(zip_error, script):
side_effect(Command(script=script), None)
assert(os.listdir('.') == ['foo.zip'])
@pytest.mark.parametrize('script,fixed', [
('unzip foo', 'unzip foo -d foo'),
('unzip foo.zip', 'unzip foo.zip -d foo')])
def test_get_new_command(zip_error, script, fixed):
assert get_new_command(Command(script=script), None) == fixed
|
Add tests for the `dirty_unzip` rule
|
Add tests for the `dirty_unzip` rule
|
Python
|
mit
|
gogobebe2/thefuck,bigplus/thefuck,Aeron/thefuck,manashmndl/thefuck,subajat1/thefuck,PLNech/thefuck,princeofdarkness76/thefuck,mcarton/thefuck,lawrencebenson/thefuck,thinkerchan/thefuck,Clpsplug/thefuck,vanita5/thefuck,levythu/thefuck,BertieJim/thefuck,hxddh/thefuck,Clpsplug/thefuck,zhangzhishan/thefuck,barneyElDinosaurio/thefuck,lawrencebenson/thefuck,beni55/thefuck,subajat1/thefuck,mlk/thefuck,redreamality/thefuck,nvbn/thefuck,mlk/thefuck,roth1002/thefuck,PLNech/thefuck,beni55/thefuck,hxddh/thefuck,vanita5/thefuck,SimenB/thefuck,bigplus/thefuck,SimenB/thefuck,thesoulkiller/thefuck,mcarton/thefuck,bugaevc/thefuck,mbbill/thefuck,BertieJim/thefuck,AntonChankin/thefuck,sekaiamber/thefuck,nvbn/thefuck,MJerty/thefuck,thesoulkiller/thefuck,redreamality/thefuck,roth1002/thefuck,LawrenceHan/thefuck,manashmndl/thefuck,MJerty/thefuck,princeofdarkness76/thefuck,scorphus/thefuck,ostree/thefuck,artiya4u/thefuck,barneyElDinosaurio/thefuck,qingying5810/thefuck,scorphus/thefuck,thinkerchan/thefuck,ostree/thefuck,AntonChankin/thefuck,LawrenceHan/thefuck,NguyenHoaiNam/thefuck,levythu/thefuck
|
Add tests for the `dirty_unzip` rule
|
import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def zip_error(tmpdir):
path = os.path.join(str(tmpdir), 'foo.zip')
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {'foo.zip', 'a', 'b', 'c'})
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_match(zip_error, script):
assert match(Command(script=script), None)
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_side_effect(zip_error, script):
side_effect(Command(script=script), None)
assert(os.listdir('.') == ['foo.zip'])
@pytest.mark.parametrize('script,fixed', [
('unzip foo', 'unzip foo -d foo'),
('unzip foo.zip', 'unzip foo.zip -d foo')])
def test_get_new_command(zip_error, script, fixed):
assert get_new_command(Command(script=script), None) == fixed
|
<commit_before><commit_msg>Add tests for the `dirty_unzip` rule<commit_after>
|
import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def zip_error(tmpdir):
path = os.path.join(str(tmpdir), 'foo.zip')
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {'foo.zip', 'a', 'b', 'c'})
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_match(zip_error, script):
assert match(Command(script=script), None)
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_side_effect(zip_error, script):
side_effect(Command(script=script), None)
assert(os.listdir('.') == ['foo.zip'])
@pytest.mark.parametrize('script,fixed', [
('unzip foo', 'unzip foo -d foo'),
('unzip foo.zip', 'unzip foo.zip -d foo')])
def test_get_new_command(zip_error, script, fixed):
assert get_new_command(Command(script=script), None) == fixed
|
Add tests for the `dirty_unzip` ruleimport os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def zip_error(tmpdir):
path = os.path.join(str(tmpdir), 'foo.zip')
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {'foo.zip', 'a', 'b', 'c'})
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_match(zip_error, script):
assert match(Command(script=script), None)
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_side_effect(zip_error, script):
side_effect(Command(script=script), None)
assert(os.listdir('.') == ['foo.zip'])
@pytest.mark.parametrize('script,fixed', [
('unzip foo', 'unzip foo -d foo'),
('unzip foo.zip', 'unzip foo.zip -d foo')])
def test_get_new_command(zip_error, script, fixed):
assert get_new_command(Command(script=script), None) == fixed
|
<commit_before><commit_msg>Add tests for the `dirty_unzip` rule<commit_after>import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
@pytest.fixture
def zip_error(tmpdir):
path = os.path.join(str(tmpdir), 'foo.zip')
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
assert(set(os.listdir('.')) == {'foo.zip', 'a', 'b', 'c'})
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_match(zip_error, script):
assert match(Command(script=script), None)
@pytest.mark.parametrize('script', [
'unzip foo',
'unzip foo.zip'])
def test_side_effect(zip_error, script):
side_effect(Command(script=script), None)
assert(os.listdir('.') == ['foo.zip'])
@pytest.mark.parametrize('script,fixed', [
('unzip foo', 'unzip foo -d foo'),
('unzip foo.zip', 'unzip foo.zip -d foo')])
def test_get_new_command(zip_error, script, fixed):
assert get_new_command(Command(script=script), None) == fixed
|
|
0749111442c638569b6e42a11adee70e71e50813
|
test/lldbpexpect.py
|
test/lldbpexpect.py
|
import lldb
from lldbtest import *
import lldbutil
import os
import unittest2
import sys
import pexpect
class PExpectTest(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def doTest(self):
# put your commands here
pass
def launchArgs(self):
return ""
def launch(self):
self.timeout = 5
self.child = pexpect.spawn('%s %s' % self.lldbHere, self.launchArgs())
def expect(self, patterns=None, timeout=None):
if patterns is None: patterns = '.*'
return self.child.expect(patterns, timeout=timeout)
def sendimpl(self, sender, command, patterns=None, timeout=None):
if timeout is None: timeout = self.timeout
sender(command)
if patterns is not None: return self.expect(patterns=patterns, timeout=timeout)
return None
def send(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.send, command, patterns, timeout)
def sendline(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.sendline, command, patterns, timeout)
def quit(self, gracefully=None):
if gracefully is None: gracefully = True
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
|
Add an helper class to write pexpect-based test cases Over time, we should improve this class and port all pexpect based testing over to using this
|
Add an helper class to write pexpect-based test cases
Over time, we should improve this class and port all pexpect based testing over to using this
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@227875 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Add an helper class to write pexpect-based test cases
Over time, we should improve this class and port all pexpect based testing over to using this
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@227875 91177308-0d34-0410-b5e6-96231b3b80d8
|
import lldb
from lldbtest import *
import lldbutil
import os
import unittest2
import sys
import pexpect
class PExpectTest(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def doTest(self):
# put your commands here
pass
def launchArgs(self):
return ""
def launch(self):
self.timeout = 5
self.child = pexpect.spawn('%s %s' % self.lldbHere, self.launchArgs())
def expect(self, patterns=None, timeout=None):
if patterns is None: patterns = '.*'
return self.child.expect(patterns, timeout=timeout)
def sendimpl(self, sender, command, patterns=None, timeout=None):
if timeout is None: timeout = self.timeout
sender(command)
if patterns is not None: return self.expect(patterns=patterns, timeout=timeout)
return None
def send(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.send, command, patterns, timeout)
def sendline(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.sendline, command, patterns, timeout)
def quit(self, gracefully=None):
if gracefully is None: gracefully = True
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
|
<commit_before><commit_msg>Add an helper class to write pexpect-based test cases
Over time, we should improve this class and port all pexpect based testing over to using this
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@227875 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
import lldb
from lldbtest import *
import lldbutil
import os
import unittest2
import sys
import pexpect
class PExpectTest(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def doTest(self):
# put your commands here
pass
def launchArgs(self):
return ""
def launch(self):
self.timeout = 5
self.child = pexpect.spawn('%s %s' % self.lldbHere, self.launchArgs())
def expect(self, patterns=None, timeout=None):
if patterns is None: patterns = '.*'
return self.child.expect(patterns, timeout=timeout)
def sendimpl(self, sender, command, patterns=None, timeout=None):
if timeout is None: timeout = self.timeout
sender(command)
if patterns is not None: return self.expect(patterns=patterns, timeout=timeout)
return None
def send(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.send, command, patterns, timeout)
def sendline(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.sendline, command, patterns, timeout)
def quit(self, gracefully=None):
if gracefully is None: gracefully = True
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
|
Add an helper class to write pexpect-based test cases
Over time, we should improve this class and port all pexpect based testing over to using this
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@227875 91177308-0d34-0410-b5e6-96231b3b80d8import lldb
from lldbtest import *
import lldbutil
import os
import unittest2
import sys
import pexpect
class PExpectTest(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def doTest(self):
# put your commands here
pass
def launchArgs(self):
return ""
def launch(self):
self.timeout = 5
self.child = pexpect.spawn('%s %s' % self.lldbHere, self.launchArgs())
def expect(self, patterns=None, timeout=None):
if patterns is None: patterns = '.*'
return self.child.expect(patterns, timeout=timeout)
def sendimpl(self, sender, command, patterns=None, timeout=None):
if timeout is None: timeout = self.timeout
sender(command)
if patterns is not None: return self.expect(patterns=patterns, timeout=timeout)
return None
def send(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.send, command, patterns, timeout)
def sendline(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.sendline, command, patterns, timeout)
def quit(self, gracefully=None):
if gracefully is None: gracefully = True
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
|
<commit_before><commit_msg>Add an helper class to write pexpect-based test cases
Over time, we should improve this class and port all pexpect based testing over to using this
git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@227875 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>import lldb
from lldbtest import *
import lldbutil
import os
import unittest2
import sys
import pexpect
class PExpectTest(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def doTest(self):
# put your commands here
pass
def launchArgs(self):
return ""
def launch(self):
self.timeout = 5
self.child = pexpect.spawn('%s %s' % self.lldbHere, self.launchArgs())
def expect(self, patterns=None, timeout=None):
if patterns is None: patterns = '.*'
return self.child.expect(patterns, timeout=timeout)
def sendimpl(self, sender, command, patterns=None, timeout=None):
if timeout is None: timeout = self.timeout
sender(command)
if patterns is not None: return self.expect(patterns=patterns, timeout=timeout)
return None
def send(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.send, command, patterns, timeout)
def sendline(self, command, patterns=None, timeout=None):
self.sendimpl(self.child.sendline, command, patterns, timeout)
def quit(self, gracefully=None):
if gracefully is None: gracefully = True
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
|
|
305c3e0ce2705dd23e00ec801f5588ec1dbcc3a8
|
py/two-sum-ii-input-array-is-sorted.py
|
py/two-sum-ii-input-array-is-sorted.py
|
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
head, tail = 0, len(numbers) - 1
while head < tail:
s = numbers[head] + numbers[tail]
if s == target:
return [head + 1, tail + 1]
elif s < target:
head += 1
elif s > target:
tail -= 1
|
Add py solution for 167. Two Sum II - Input array is sorted
|
Add py solution for 167. Two Sum II - Input array is sorted
167. Two Sum II - Input array is sorted: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 167. Two Sum II - Input array is sorted
167. Two Sum II - Input array is sorted: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/
|
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
head, tail = 0, len(numbers) - 1
while head < tail:
s = numbers[head] + numbers[tail]
if s == target:
return [head + 1, tail + 1]
elif s < target:
head += 1
elif s > target:
tail -= 1
|
<commit_before><commit_msg>Add py solution for 167. Two Sum II - Input array is sorted
167. Two Sum II - Input array is sorted: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/<commit_after>
|
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
head, tail = 0, len(numbers) - 1
while head < tail:
s = numbers[head] + numbers[tail]
if s == target:
return [head + 1, tail + 1]
elif s < target:
head += 1
elif s > target:
tail -= 1
|
Add py solution for 167. Two Sum II - Input array is sorted
167. Two Sum II - Input array is sorted: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
head, tail = 0, len(numbers) - 1
while head < tail:
s = numbers[head] + numbers[tail]
if s == target:
return [head + 1, tail + 1]
elif s < target:
head += 1
elif s > target:
tail -= 1
|
<commit_before><commit_msg>Add py solution for 167. Two Sum II - Input array is sorted
167. Two Sum II - Input array is sorted: https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/<commit_after>class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
head, tail = 0, len(numbers) - 1
while head < tail:
s = numbers[head] + numbers[tail]
if s == target:
return [head + 1, tail + 1]
elif s < target:
head += 1
elif s > target:
tail -= 1
|
|
f8e24bf955eb70535b989aad6ab8666ddd013da1
|
tests/test_basic.py
|
tests/test_basic.py
|
import pytest
import subprocess
import os
import sys
prefix = '.'
for i in range(0,3):
if os.path.exists(os.path.join(prefix, 'pyiso.py')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pyiso
def test_nofiles(tmpdir):
# First set things up, and generate the ISO with genisoimage
outfile = tmpdir.join("no-file-test.iso")
indir = tmpdir.mkdir("nofile")
subprocess.call(["genisoimage", "-v", "-v", "-iso-level", "1", "-no-pad",
"-o", str(outfile), str(indir)])
iso = pyiso.PyIso()
iso.open(open(str(outfile), 'rb'))
# With no files, the ISO should be exactly 24 extents long
assert(iso.pvd.space_size == 24)
assert(iso.pvd.log_block_size == 2048)
assert(iso.pvd.path_tbl_size == 10)
|
Add in first py.test tests.
|
Add in first py.test tests.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>
|
Python
|
lgpl-2.1
|
clalancette/pycdlib,clalancette/pyiso
|
Add in first py.test tests.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>
|
import pytest
import subprocess
import os
import sys
prefix = '.'
for i in range(0,3):
if os.path.exists(os.path.join(prefix, 'pyiso.py')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pyiso
def test_nofiles(tmpdir):
# First set things up, and generate the ISO with genisoimage
outfile = tmpdir.join("no-file-test.iso")
indir = tmpdir.mkdir("nofile")
subprocess.call(["genisoimage", "-v", "-v", "-iso-level", "1", "-no-pad",
"-o", str(outfile), str(indir)])
iso = pyiso.PyIso()
iso.open(open(str(outfile), 'rb'))
# With no files, the ISO should be exactly 24 extents long
assert(iso.pvd.space_size == 24)
assert(iso.pvd.log_block_size == 2048)
assert(iso.pvd.path_tbl_size == 10)
|
<commit_before><commit_msg>Add in first py.test tests.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com><commit_after>
|
import pytest
import subprocess
import os
import sys
prefix = '.'
for i in range(0,3):
if os.path.exists(os.path.join(prefix, 'pyiso.py')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pyiso
def test_nofiles(tmpdir):
# First set things up, and generate the ISO with genisoimage
outfile = tmpdir.join("no-file-test.iso")
indir = tmpdir.mkdir("nofile")
subprocess.call(["genisoimage", "-v", "-v", "-iso-level", "1", "-no-pad",
"-o", str(outfile), str(indir)])
iso = pyiso.PyIso()
iso.open(open(str(outfile), 'rb'))
# With no files, the ISO should be exactly 24 extents long
assert(iso.pvd.space_size == 24)
assert(iso.pvd.log_block_size == 2048)
assert(iso.pvd.path_tbl_size == 10)
|
Add in first py.test tests.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com>import pytest
import subprocess
import os
import sys
prefix = '.'
for i in range(0,3):
if os.path.exists(os.path.join(prefix, 'pyiso.py')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pyiso
def test_nofiles(tmpdir):
# First set things up, and generate the ISO with genisoimage
outfile = tmpdir.join("no-file-test.iso")
indir = tmpdir.mkdir("nofile")
subprocess.call(["genisoimage", "-v", "-v", "-iso-level", "1", "-no-pad",
"-o", str(outfile), str(indir)])
iso = pyiso.PyIso()
iso.open(open(str(outfile), 'rb'))
# With no files, the ISO should be exactly 24 extents long
assert(iso.pvd.space_size == 24)
assert(iso.pvd.log_block_size == 2048)
assert(iso.pvd.path_tbl_size == 10)
|
<commit_before><commit_msg>Add in first py.test tests.
Signed-off-by: Chris Lalancette <281cd07d7578d97c83271fbbf2faddb83ab3791c@gmail.com><commit_after>import pytest
import subprocess
import os
import sys
prefix = '.'
for i in range(0,3):
if os.path.exists(os.path.join(prefix, 'pyiso.py')):
sys.path.insert(0, prefix)
break
else:
prefix = '../' + prefix
import pyiso
def test_nofiles(tmpdir):
# First set things up, and generate the ISO with genisoimage
outfile = tmpdir.join("no-file-test.iso")
indir = tmpdir.mkdir("nofile")
subprocess.call(["genisoimage", "-v", "-v", "-iso-level", "1", "-no-pad",
"-o", str(outfile), str(indir)])
iso = pyiso.PyIso()
iso.open(open(str(outfile), 'rb'))
# With no files, the ISO should be exactly 24 extents long
assert(iso.pvd.space_size == 24)
assert(iso.pvd.log_block_size == 2048)
assert(iso.pvd.path_tbl_size == 10)
|
|
9c6f0cf829f4f0c7ff71ad65bed36269425dae13
|
social_core/tests/backends/test_zoom.py
|
social_core/tests/backends/test_zoom.py
|
import json
from .oauth import OAuth2Test
class ZoomOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.zoom.ZoomOAuth2'
user_data_url = 'https://api.zoom.us/v2/users/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar-token',
'token_type': 'bearer',
'refresh_token': 'foobar-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
user_data_body = json.dumps({
'id': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foobar@email.com',
'type': 2,
'role_name': 'Foobar',
'pmi': 1234567890,
'use_pmi': False,
'vanity_url': 'https://foobar.zoom.us/my/foobar',
'personal_meeting_url': 'https://foobar.zoom.us/j/1234567890',
'timezone': 'America/Denver',
'verified': 1,
'dept': '',
'created_at': '2019-04-05T15:24:32Z',
'last_login_time': '2019-12-16T18:02:48Z',
'last_client_version': 'version',
'pic_url': 'https://foobar.zoom.us/p/123456789',
'host_key': '123456',
'jid': 'foobar@xmpp.zoom.us',
'group_ids': [],
'im_group_ids': [
'foobar-group-id'
],
'account_id': 'foobar-account-id',
'language': 'en-US',
'phone_country': 'US',
'phone_number': '+1 1234567891',
'status': 'active'
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'refresh_token': 'foobar-new-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
Add test for zoom backend
|
Add test for zoom backend
|
Python
|
bsd-3-clause
|
python-social-auth/social-core,python-social-auth/social-core
|
Add test for zoom backend
|
import json
from .oauth import OAuth2Test
class ZoomOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.zoom.ZoomOAuth2'
user_data_url = 'https://api.zoom.us/v2/users/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar-token',
'token_type': 'bearer',
'refresh_token': 'foobar-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
user_data_body = json.dumps({
'id': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foobar@email.com',
'type': 2,
'role_name': 'Foobar',
'pmi': 1234567890,
'use_pmi': False,
'vanity_url': 'https://foobar.zoom.us/my/foobar',
'personal_meeting_url': 'https://foobar.zoom.us/j/1234567890',
'timezone': 'America/Denver',
'verified': 1,
'dept': '',
'created_at': '2019-04-05T15:24:32Z',
'last_login_time': '2019-12-16T18:02:48Z',
'last_client_version': 'version',
'pic_url': 'https://foobar.zoom.us/p/123456789',
'host_key': '123456',
'jid': 'foobar@xmpp.zoom.us',
'group_ids': [],
'im_group_ids': [
'foobar-group-id'
],
'account_id': 'foobar-account-id',
'language': 'en-US',
'phone_country': 'US',
'phone_number': '+1 1234567891',
'status': 'active'
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'refresh_token': 'foobar-new-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
<commit_before><commit_msg>Add test for zoom backend<commit_after>
|
import json
from .oauth import OAuth2Test
class ZoomOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.zoom.ZoomOAuth2'
user_data_url = 'https://api.zoom.us/v2/users/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar-token',
'token_type': 'bearer',
'refresh_token': 'foobar-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
user_data_body = json.dumps({
'id': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foobar@email.com',
'type': 2,
'role_name': 'Foobar',
'pmi': 1234567890,
'use_pmi': False,
'vanity_url': 'https://foobar.zoom.us/my/foobar',
'personal_meeting_url': 'https://foobar.zoom.us/j/1234567890',
'timezone': 'America/Denver',
'verified': 1,
'dept': '',
'created_at': '2019-04-05T15:24:32Z',
'last_login_time': '2019-12-16T18:02:48Z',
'last_client_version': 'version',
'pic_url': 'https://foobar.zoom.us/p/123456789',
'host_key': '123456',
'jid': 'foobar@xmpp.zoom.us',
'group_ids': [],
'im_group_ids': [
'foobar-group-id'
],
'account_id': 'foobar-account-id',
'language': 'en-US',
'phone_country': 'US',
'phone_number': '+1 1234567891',
'status': 'active'
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'refresh_token': 'foobar-new-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
Add test for zoom backendimport json
from .oauth import OAuth2Test
class ZoomOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.zoom.ZoomOAuth2'
user_data_url = 'https://api.zoom.us/v2/users/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar-token',
'token_type': 'bearer',
'refresh_token': 'foobar-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
user_data_body = json.dumps({
'id': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foobar@email.com',
'type': 2,
'role_name': 'Foobar',
'pmi': 1234567890,
'use_pmi': False,
'vanity_url': 'https://foobar.zoom.us/my/foobar',
'personal_meeting_url': 'https://foobar.zoom.us/j/1234567890',
'timezone': 'America/Denver',
'verified': 1,
'dept': '',
'created_at': '2019-04-05T15:24:32Z',
'last_login_time': '2019-12-16T18:02:48Z',
'last_client_version': 'version',
'pic_url': 'https://foobar.zoom.us/p/123456789',
'host_key': '123456',
'jid': 'foobar@xmpp.zoom.us',
'group_ids': [],
'im_group_ids': [
'foobar-group-id'
],
'account_id': 'foobar-account-id',
'language': 'en-US',
'phone_country': 'US',
'phone_number': '+1 1234567891',
'status': 'active'
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'refresh_token': 'foobar-new-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
<commit_before><commit_msg>Add test for zoom backend<commit_after>import json
from .oauth import OAuth2Test
class ZoomOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.zoom.ZoomOAuth2'
user_data_url = 'https://api.zoom.us/v2/users/me'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar-token',
'token_type': 'bearer',
'refresh_token': 'foobar-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
user_data_body = json.dumps({
'id': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'email': 'foobar@email.com',
'type': 2,
'role_name': 'Foobar',
'pmi': 1234567890,
'use_pmi': False,
'vanity_url': 'https://foobar.zoom.us/my/foobar',
'personal_meeting_url': 'https://foobar.zoom.us/j/1234567890',
'timezone': 'America/Denver',
'verified': 1,
'dept': '',
'created_at': '2019-04-05T15:24:32Z',
'last_login_time': '2019-12-16T18:02:48Z',
'last_client_version': 'version',
'pic_url': 'https://foobar.zoom.us/p/123456789',
'host_key': '123456',
'jid': 'foobar@xmpp.zoom.us',
'group_ids': [],
'im_group_ids': [
'foobar-group-id'
],
'account_id': 'foobar-account-id',
'language': 'en-US',
'phone_country': 'US',
'phone_number': '+1 1234567891',
'status': 'active'
})
refresh_token_body = json.dumps({
'access_token': 'foobar-new-token',
'token_type': 'bearer',
'refresh_token': 'foobar-new-refresh-token',
'expires_in': 3599,
'scope': 'identity'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
|
|
6824c741c455339eaaff5481f6e84c42fe1e26cf
|
susanplay/mainSusan.py
|
susanplay/mainSusan.py
|
"""
This is a template top level script.
Please don't edit this file. Instead, copy it to
youname_main.py, then run and edit that file.
"""
import dave.pipeline.pipeline as dpp
import dave.pipeline.clipboard as clipboard
def main():
"""A bare bones main program"""
cfg = loadMyConfiguration()
epicList = [206103150]
for epic in epicList:
runOne(epic, cfg)
def loadMyConfiguration():
"""Load the default pipeline configuration and adjust as necessary
"""
cfg = dpp.loadDefaultConfig()
#Edit the default configuration to your taste.
#Change anything else you don't like about the default config here.
cfg['debug'] = True
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask dpp.lppMetricTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.plotTask dpp.saveOnError""".split()
cfg['taskList'] = tasks
return cfg
def runOne(k2id, config):
"""Run the pipeline on a single target.
Inputs:
------------
k2id
(int) Epic id of target to run on.
config
(dict) Dictionary of configuration parameters as created by, e.g
loadMyConfiguration()
Returns:
---------
A clipboard containing the results.
Notes:
---------
Don't edit this function. The pipeline can recover gracefully from
errors in any individual task, but an error in this function will crash
the pipeline
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
Copy of main.py from fergal, reworked
|
Copy of main.py from fergal, reworked
|
Python
|
mit
|
barentsen/dave,barentsen/dave,barentsen/dave,barentsen/dave
|
Copy of main.py from fergal, reworked
|
"""
This is a template top level script.
Please don't edit this file. Instead, copy it to
youname_main.py, then run and edit that file.
"""
import dave.pipeline.pipeline as dpp
import dave.pipeline.clipboard as clipboard
def main():
"""A bare bones main program"""
cfg = loadMyConfiguration()
epicList = [206103150]
for epic in epicList:
runOne(epic, cfg)
def loadMyConfiguration():
"""Load the default pipeline configuration and adjust as necessary
"""
cfg = dpp.loadDefaultConfig()
#Edit the default configuration to your taste.
#Change anything else you don't like about the default config here.
cfg['debug'] = True
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask dpp.lppMetricTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.plotTask dpp.saveOnError""".split()
cfg['taskList'] = tasks
return cfg
def runOne(k2id, config):
"""Run the pipeline on a single target.
Inputs:
------------
k2id
(int) Epic id of target to run on.
config
(dict) Dictionary of configuration parameters as created by, e.g
loadMyConfiguration()
Returns:
---------
A clipboard containing the results.
Notes:
---------
Don't edit this function. The pipeline can recover gracefully from
errors in any individual task, but an error in this function will crash
the pipeline
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
<commit_before><commit_msg>Copy of main.py from fergal, reworked<commit_after>
|
"""
This is a template top level script.
Please don't edit this file. Instead, copy it to
youname_main.py, then run and edit that file.
"""
import dave.pipeline.pipeline as dpp
import dave.pipeline.clipboard as clipboard
def main():
"""A bare bones main program"""
cfg = loadMyConfiguration()
epicList = [206103150]
for epic in epicList:
runOne(epic, cfg)
def loadMyConfiguration():
"""Load the default pipeline configuration and adjust as necessary
"""
cfg = dpp.loadDefaultConfig()
#Edit the default configuration to your taste.
#Change anything else you don't like about the default config here.
cfg['debug'] = True
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask dpp.lppMetricTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.plotTask dpp.saveOnError""".split()
cfg['taskList'] = tasks
return cfg
def runOne(k2id, config):
"""Run the pipeline on a single target.
Inputs:
------------
k2id
(int) Epic id of target to run on.
config
(dict) Dictionary of configuration parameters as created by, e.g
loadMyConfiguration()
Returns:
---------
A clipboard containing the results.
Notes:
---------
Don't edit this function. The pipeline can recover gracefully from
errors in any individual task, but an error in this function will crash
the pipeline
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
Copy of main.py from fergal, reworked
"""
This is a template top level script.
Please don't edit this file. Instead, copy it to
youname_main.py, then run and edit that file.
"""
import dave.pipeline.pipeline as dpp
import dave.pipeline.clipboard as clipboard
def main():
"""A bare bones main program"""
cfg = loadMyConfiguration()
epicList = [206103150]
for epic in epicList:
runOne(epic, cfg)
def loadMyConfiguration():
"""Load the default pipeline configuration and adjust as necessary
"""
cfg = dpp.loadDefaultConfig()
#Edit the default configuration to your taste.
#Change anything else you don't like about the default config here.
cfg['debug'] = True
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask dpp.lppMetricTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.plotTask dpp.saveOnError""".split()
cfg['taskList'] = tasks
return cfg
def runOne(k2id, config):
"""Run the pipeline on a single target.
Inputs:
------------
k2id
(int) Epic id of target to run on.
config
(dict) Dictionary of configuration parameters as created by, e.g
loadMyConfiguration()
Returns:
---------
A clipboard containing the results.
Notes:
---------
Don't edit this function. The pipeline can recover gracefully from
errors in any individual task, but an error in this function will crash
the pipeline
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
<commit_before><commit_msg>Copy of main.py from fergal, reworked<commit_after>
"""
This is a template top level script.
Please don't edit this file. Instead, copy it to
youname_main.py, then run and edit that file.
"""
import dave.pipeline.pipeline as dpp
import dave.pipeline.clipboard as clipboard
def main():
"""A bare bones main program"""
cfg = loadMyConfiguration()
epicList = [206103150]
for epic in epicList:
runOne(epic, cfg)
def loadMyConfiguration():
"""Load the default pipeline configuration and adjust as necessary
"""
cfg = dpp.loadDefaultConfig()
#Edit the default configuration to your taste.
#Change anything else you don't like about the default config here.
cfg['debug'] = True
tasks = """dpp.checkDirExistTask dpp.serveTask dpp.extractLightcurveTask
dpp.computeCentroidsTask dpp.rollPhaseTask dpp.cotrendDataTask
dpp.detrendDataTask dpp.blsTask dpp.trapezoidFitTask dpp.lppMetricTask
dpp.modshiftTask dpp.measureDiffImgCentroidsTask dpp.dispositionTask
dpp.plotTask dpp.saveOnError""".split()
cfg['taskList'] = tasks
return cfg
def runOne(k2id, config):
"""Run the pipeline on a single target.
Inputs:
------------
k2id
(int) Epic id of target to run on.
config
(dict) Dictionary of configuration parameters as created by, e.g
loadMyConfiguration()
Returns:
---------
A clipboard containing the results.
Notes:
---------
Don't edit this function. The pipeline can recover gracefully from
errors in any individual task, but an error in this function will crash
the pipeline
"""
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
|
|
a4a956899008102b993d2268fbf6ae92d191ee6a
|
ifttt/ifttt-tests.py
|
ifttt/ifttt-tests.py
|
# -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 Ori Livneh <ori@wikimedia.org>
Stephen LaPorte <stephen.laporte@gmail.com>
Alangi Derick <alangiderick@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from triggers import *
def test_aotd_trigger():
"""Test suite for Article of the Day trigger"""
pass
|
Test suite for Wikipedia triggers
|
Test suite for Wikipedia triggers
* This marks the start of the test cases for
Wikipedia IFTTT triggers.
|
Python
|
apache-2.0
|
ch3nkula/ifttt,ch3nkula/ifttt,ch3nkula/ifttt
|
Test suite for Wikipedia triggers
* This marks the start of the test cases for
Wikipedia IFTTT triggers.
|
# -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 Ori Livneh <ori@wikimedia.org>
Stephen LaPorte <stephen.laporte@gmail.com>
Alangi Derick <alangiderick@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from triggers import *
def test_aotd_trigger():
"""Test suite for Article of the Day trigger"""
pass
|
<commit_before><commit_msg>Test suite for Wikipedia triggers
* This marks the start of the test cases for
Wikipedia IFTTT triggers.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 Ori Livneh <ori@wikimedia.org>
Stephen LaPorte <stephen.laporte@gmail.com>
Alangi Derick <alangiderick@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from triggers import *
def test_aotd_trigger():
"""Test suite for Article of the Day trigger"""
pass
|
Test suite for Wikipedia triggers
* This marks the start of the test cases for
Wikipedia IFTTT triggers.# -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 Ori Livneh <ori@wikimedia.org>
Stephen LaPorte <stephen.laporte@gmail.com>
Alangi Derick <alangiderick@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from triggers import *
def test_aotd_trigger():
"""Test suite for Article of the Day trigger"""
pass
|
<commit_before><commit_msg>Test suite for Wikipedia triggers
* This marks the start of the test cases for
Wikipedia IFTTT triggers.<commit_after># -*- coding: utf-8 -*-
"""
Wikipedia channel for IFTTT
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2015 Ori Livneh <ori@wikimedia.org>
Stephen LaPorte <stephen.laporte@gmail.com>
Alangi Derick <alangiderick@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from triggers import *
def test_aotd_trigger():
"""Test suite for Article of the Day trigger"""
pass
|
|
4a8c3043962efa7e2a443a10a0ad13d025699730
|
support/get_lsf_job_info.py
|
support/get_lsf_job_info.py
|
import os
import sys
from subprocess import Popen, PIPE
# --------------------------------------------------------------------------------------------------
def get_job_run_time(lsf_output_file, time='s'):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Run time', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
run_time = int(output.split(" ")[-2])
if time == 'm':
run_time = run_time/60
return run_time
# --------------------------------------------------------------------------------------------------
def get_job_max_used_memory(lsf_output_file):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Max Memory', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
max_memory = int(output.split(" ")[-2])
return max_memory
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
source_dir = sys.argv[1]
families = [x for x in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, x))]
for family in families:
family_dir = (os.path.join(source_dir, family))
lsf_output_file = os.path.join(family_dir, "auto_rfsearch.out")
run_time = get_job_run_time(lsf_output_file, time='m')
memory = get_job_max_used_memory(lsf_output_file)
print "%s\t%s\t%s" % (family, run_time, memory)
|
Add script to extract useful lsf job information
|
Add script to extract useful lsf job information
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add script to extract useful lsf job information
|
import os
import sys
from subprocess import Popen, PIPE
# --------------------------------------------------------------------------------------------------
def get_job_run_time(lsf_output_file, time='s'):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Run time', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
run_time = int(output.split(" ")[-2])
if time == 'm':
run_time = run_time/60
return run_time
# --------------------------------------------------------------------------------------------------
def get_job_max_used_memory(lsf_output_file):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Max Memory', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
max_memory = int(output.split(" ")[-2])
return max_memory
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
source_dir = sys.argv[1]
families = [x for x in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, x))]
for family in families:
family_dir = (os.path.join(source_dir, family))
lsf_output_file = os.path.join(family_dir, "auto_rfsearch.out")
run_time = get_job_run_time(lsf_output_file, time='m')
memory = get_job_max_used_memory(lsf_output_file)
print "%s\t%s\t%s" % (family, run_time, memory)
|
<commit_before><commit_msg>Add script to extract useful lsf job information<commit_after>
|
import os
import sys
from subprocess import Popen, PIPE
# --------------------------------------------------------------------------------------------------
def get_job_run_time(lsf_output_file, time='s'):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Run time', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
run_time = int(output.split(" ")[-2])
if time == 'm':
run_time = run_time/60
return run_time
# --------------------------------------------------------------------------------------------------
def get_job_max_used_memory(lsf_output_file):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Max Memory', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
max_memory = int(output.split(" ")[-2])
return max_memory
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
source_dir = sys.argv[1]
families = [x for x in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, x))]
for family in families:
family_dir = (os.path.join(source_dir, family))
lsf_output_file = os.path.join(family_dir, "auto_rfsearch.out")
run_time = get_job_run_time(lsf_output_file, time='m')
memory = get_job_max_used_memory(lsf_output_file)
print "%s\t%s\t%s" % (family, run_time, memory)
|
Add script to extract useful lsf job informationimport os
import sys
from subprocess import Popen, PIPE
# --------------------------------------------------------------------------------------------------
def get_job_run_time(lsf_output_file, time='s'):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Run time', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
run_time = int(output.split(" ")[-2])
if time == 'm':
run_time = run_time/60
return run_time
# --------------------------------------------------------------------------------------------------
def get_job_max_used_memory(lsf_output_file):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Max Memory', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
max_memory = int(output.split(" ")[-2])
return max_memory
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
source_dir = sys.argv[1]
families = [x for x in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, x))]
for family in families:
family_dir = (os.path.join(source_dir, family))
lsf_output_file = os.path.join(family_dir, "auto_rfsearch.out")
run_time = get_job_run_time(lsf_output_file, time='m')
memory = get_job_max_used_memory(lsf_output_file)
print "%s\t%s\t%s" % (family, run_time, memory)
|
<commit_before><commit_msg>Add script to extract useful lsf job information<commit_after>import os
import sys
from subprocess import Popen, PIPE
# --------------------------------------------------------------------------------------------------
def get_job_run_time(lsf_output_file, time='s'):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Run time', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
run_time = int(output.split(" ")[-2])
if time == 'm':
run_time = run_time/60
return run_time
# --------------------------------------------------------------------------------------------------
def get_job_max_used_memory(lsf_output_file):
"""
"""
fp = open(lsf_output_file, 'r')
process = Popen(['grep', 'Max Memory', lsf_output_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
max_memory = int(output.split(" ")[-2])
return max_memory
# --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
source_dir = sys.argv[1]
families = [x for x in os.listdir(source_dir) if os.path.isdir(os.path.join(source_dir, x))]
for family in families:
family_dir = (os.path.join(source_dir, family))
lsf_output_file = os.path.join(family_dir, "auto_rfsearch.out")
run_time = get_job_run_time(lsf_output_file, time='m')
memory = get_job_max_used_memory(lsf_output_file)
print "%s\t%s\t%s" % (family, run_time, memory)
|
|
52d947daa8ea6642472660d0c16c2b05e34bea41
|
src/users/migrations/0010_cocrecord.py
|
src/users/migrations/0010_cocrecord.py
|
# Generated by Django 3.0.2 on 2020-02-23 11:12
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('coc_version', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^202[\\d].[\\d]+$', 'Not a valid CoC version')], verbose_name='latest agreed CoC version')),
],
),
]
|
Add migration file for the model of CoC record
|
Add migration file for the model of CoC record
|
Python
|
mit
|
pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016
|
Add migration file for the model of CoC record
|
# Generated by Django 3.0.2 on 2020-02-23 11:12
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('coc_version', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^202[\\d].[\\d]+$', 'Not a valid CoC version')], verbose_name='latest agreed CoC version')),
],
),
]
|
<commit_before><commit_msg>Add migration file for the model of CoC record<commit_after>
|
# Generated by Django 3.0.2 on 2020-02-23 11:12
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('coc_version', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^202[\\d].[\\d]+$', 'Not a valid CoC version')], verbose_name='latest agreed CoC version')),
],
),
]
|
Add migration file for the model of CoC record# Generated by Django 3.0.2 on 2020-02-23 11:12
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('coc_version', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^202[\\d].[\\d]+$', 'Not a valid CoC version')], verbose_name='latest agreed CoC version')),
],
),
]
|
<commit_before><commit_msg>Add migration file for the model of CoC record<commit_after># Generated by Django 3.0.2 on 2020-02-23 11:12
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20160227_1656'),
]
operations = [
migrations.CreateModel(
name='CocRecord',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, verbose_name='user')),
('coc_version', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^202[\\d].[\\d]+$', 'Not a valid CoC version')], verbose_name='latest agreed CoC version')),
],
),
]
|
|
b7bc68872a45396358ce20a215e3a3a2c3734b8a
|
py3status/modules/pretend_ram.py
|
py3status/modules/pretend_ram.py
|
# -*- coding: utf-8 -*-
from __future__ import division
import random
class Py3status:
"""
"""
format = "{bar}"
thresholds = [(0, "good"), (40, "degraded"), (75, "bad")]
cache_timeout = 1
middle_char = '|'
middle_color = None
left_char = '|'
left_color = None
right_char = '|'
right_color = None
length = 10
def post_config_hook(self):
self.increasing = True
self.value = 0
def testBars(self):
delta = random.randint(1, 10)
if self.increasing:
self.value += delta
if self.value > 99:
self.value = 100
self.increasing = False
else:
self.value -= delta
if self.value < 1:
self.value = 0
self.increasing = True
composites = self.py3.progress_bar(self.value, length=self.length,
middle_char=self.middle_char, middle_color=self.middle_color,
left_char=self.left_char, left_color=self.left_color,
right_char=self.right_char, right_color=self.right_color
)
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'bar': composites}),
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
Add pretend ram module to try out the progress bar
|
Add pretend ram module to try out the progress bar
|
Python
|
bsd-3-clause
|
guiniol/py3status,guiniol/py3status
|
Add pretend ram module to try out the progress bar
|
# -*- coding: utf-8 -*-
from __future__ import division
import random
class Py3status:
"""
"""
format = "{bar}"
thresholds = [(0, "good"), (40, "degraded"), (75, "bad")]
cache_timeout = 1
middle_char = '|'
middle_color = None
left_char = '|'
left_color = None
right_char = '|'
right_color = None
length = 10
def post_config_hook(self):
self.increasing = True
self.value = 0
def testBars(self):
delta = random.randint(1, 10)
if self.increasing:
self.value += delta
if self.value > 99:
self.value = 100
self.increasing = False
else:
self.value -= delta
if self.value < 1:
self.value = 0
self.increasing = True
composites = self.py3.progress_bar(self.value, length=self.length,
middle_char=self.middle_char, middle_color=self.middle_color,
left_char=self.left_char, left_color=self.left_color,
right_char=self.right_char, right_color=self.right_color
)
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'bar': composites}),
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
<commit_before><commit_msg>Add pretend ram module to try out the progress bar<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import division
import random
class Py3status:
"""
"""
format = "{bar}"
thresholds = [(0, "good"), (40, "degraded"), (75, "bad")]
cache_timeout = 1
middle_char = '|'
middle_color = None
left_char = '|'
left_color = None
right_char = '|'
right_color = None
length = 10
def post_config_hook(self):
self.increasing = True
self.value = 0
def testBars(self):
delta = random.randint(1, 10)
if self.increasing:
self.value += delta
if self.value > 99:
self.value = 100
self.increasing = False
else:
self.value -= delta
if self.value < 1:
self.value = 0
self.increasing = True
composites = self.py3.progress_bar(self.value, length=self.length,
middle_char=self.middle_char, middle_color=self.middle_color,
left_char=self.left_char, left_color=self.left_color,
right_char=self.right_char, right_color=self.right_color
)
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'bar': composites}),
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
Add pretend ram module to try out the progress bar# -*- coding: utf-8 -*-
from __future__ import division
import random
class Py3status:
"""
"""
format = "{bar}"
thresholds = [(0, "good"), (40, "degraded"), (75, "bad")]
cache_timeout = 1
middle_char = '|'
middle_color = None
left_char = '|'
left_color = None
right_char = '|'
right_color = None
length = 10
def post_config_hook(self):
self.increasing = True
self.value = 0
def testBars(self):
delta = random.randint(1, 10)
if self.increasing:
self.value += delta
if self.value > 99:
self.value = 100
self.increasing = False
else:
self.value -= delta
if self.value < 1:
self.value = 0
self.increasing = True
composites = self.py3.progress_bar(self.value, length=self.length,
middle_char=self.middle_char, middle_color=self.middle_color,
left_char=self.left_char, left_color=self.left_color,
right_char=self.right_char, right_color=self.right_color
)
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'bar': composites}),
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
<commit_before><commit_msg>Add pretend ram module to try out the progress bar<commit_after># -*- coding: utf-8 -*-
from __future__ import division
import random
class Py3status:
"""
"""
format = "{bar}"
thresholds = [(0, "good"), (40, "degraded"), (75, "bad")]
cache_timeout = 1
middle_char = '|'
middle_color = None
left_char = '|'
left_color = None
right_char = '|'
right_color = None
length = 10
def post_config_hook(self):
self.increasing = True
self.value = 0
def testBars(self):
delta = random.randint(1, 10)
if self.increasing:
self.value += delta
if self.value > 99:
self.value = 100
self.increasing = False
else:
self.value -= delta
if self.value < 1:
self.value = 0
self.increasing = True
composites = self.py3.progress_bar(self.value, length=self.length,
middle_char=self.middle_char, middle_color=self.middle_color,
left_char=self.left_char, left_color=self.left_color,
right_char=self.right_char, right_color=self.right_color
)
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'bar': composites}),
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
|
abc155280052ab2f216342acd7933db3e090d94e
|
test/test_export_flow.py
|
test/test_export_flow.py
|
import netlib.tutils
from libmproxy import flow_export
from . import tutils
req_get = netlib.tutils.treq(
method='GET',
headers=None,
content=None,
)
req_post = netlib.tutils.treq(
method='POST',
headers=None,
)
def test_request_simple():
flow = tutils.tflow(req=req_get)
assert flow_export.curl_command(flow)
flow = tutils.tflow(req=req_post)
assert flow_export.curl_command(flow)
|
Add some basic tests for flow_exports
|
Add some basic tests for flow_exports
|
Python
|
mit
|
mitmproxy/mitmproxy,StevenVanAcker/mitmproxy,mitmproxy/mitmproxy,xaxa89/mitmproxy,zlorb/mitmproxy,dwfreed/mitmproxy,dwfreed/mitmproxy,ddworken/mitmproxy,jvillacorta/mitmproxy,ddworken/mitmproxy,mhils/mitmproxy,mhils/mitmproxy,mitmproxy/mitmproxy,gzzhanghao/mitmproxy,laurmurclar/mitmproxy,ddworken/mitmproxy,mosajjal/mitmproxy,StevenVanAcker/mitmproxy,tdickers/mitmproxy,xaxa89/mitmproxy,dufferzafar/mitmproxy,mhils/mitmproxy,mosajjal/mitmproxy,tdickers/mitmproxy,laurmurclar/mitmproxy,ParthGanatra/mitmproxy,dufferzafar/mitmproxy,fimad/mitmproxy,mitmproxy/mitmproxy,ujjwal96/mitmproxy,zlorb/mitmproxy,jvillacorta/mitmproxy,Kriechi/mitmproxy,StevenVanAcker/mitmproxy,xaxa89/mitmproxy,ddworken/mitmproxy,cortesi/mitmproxy,ikoz/mitmproxy,dufferzafar/mitmproxy,cortesi/mitmproxy,fimad/mitmproxy,tdickers/mitmproxy,jvillacorta/mitmproxy,gzzhanghao/mitmproxy,ujjwal96/mitmproxy,mosajjal/mitmproxy,ParthGanatra/mitmproxy,zlorb/mitmproxy,ikoz/mitmproxy,fimad/mitmproxy,dwfreed/mitmproxy,MatthewShao/mitmproxy,mhils/mitmproxy,Kriechi/mitmproxy,ujjwal96/mitmproxy,ParthGanatra/mitmproxy,fimad/mitmproxy,gzzhanghao/mitmproxy,ParthGanatra/mitmproxy,laurmurclar/mitmproxy,mosajjal/mitmproxy,mitmproxy/mitmproxy,laurmurclar/mitmproxy,MatthewShao/mitmproxy,vhaupert/mitmproxy,MatthewShao/mitmproxy,xaxa89/mitmproxy,Kriechi/mitmproxy,vhaupert/mitmproxy,MatthewShao/mitmproxy,ujjwal96/mitmproxy,zlorb/mitmproxy,tdickers/mitmproxy,gzzhanghao/mitmproxy,StevenVanAcker/mitmproxy,dwfreed/mitmproxy,vhaupert/mitmproxy,dufferzafar/mitmproxy,vhaupert/mitmproxy,cortesi/mitmproxy,jvillacorta/mitmproxy,cortesi/mitmproxy,ikoz/mitmproxy,ikoz/mitmproxy,Kriechi/mitmproxy,mhils/mitmproxy
|
Add some basic tests for flow_exports
|
import netlib.tutils
from libmproxy import flow_export
from . import tutils
req_get = netlib.tutils.treq(
method='GET',
headers=None,
content=None,
)
req_post = netlib.tutils.treq(
method='POST',
headers=None,
)
def test_request_simple():
flow = tutils.tflow(req=req_get)
assert flow_export.curl_command(flow)
flow = tutils.tflow(req=req_post)
assert flow_export.curl_command(flow)
|
<commit_before><commit_msg>Add some basic tests for flow_exports<commit_after>
|
import netlib.tutils
from libmproxy import flow_export
from . import tutils
req_get = netlib.tutils.treq(
method='GET',
headers=None,
content=None,
)
req_post = netlib.tutils.treq(
method='POST',
headers=None,
)
def test_request_simple():
flow = tutils.tflow(req=req_get)
assert flow_export.curl_command(flow)
flow = tutils.tflow(req=req_post)
assert flow_export.curl_command(flow)
|
Add some basic tests for flow_exportsimport netlib.tutils
from libmproxy import flow_export
from . import tutils
req_get = netlib.tutils.treq(
method='GET',
headers=None,
content=None,
)
req_post = netlib.tutils.treq(
method='POST',
headers=None,
)
def test_request_simple():
flow = tutils.tflow(req=req_get)
assert flow_export.curl_command(flow)
flow = tutils.tflow(req=req_post)
assert flow_export.curl_command(flow)
|
<commit_before><commit_msg>Add some basic tests for flow_exports<commit_after>import netlib.tutils
from libmproxy import flow_export
from . import tutils
req_get = netlib.tutils.treq(
method='GET',
headers=None,
content=None,
)
req_post = netlib.tutils.treq(
method='POST',
headers=None,
)
def test_request_simple():
flow = tutils.tflow(req=req_get)
assert flow_export.curl_command(flow)
flow = tutils.tflow(req=req_post)
assert flow_export.curl_command(flow)
|
|
5c0ef34788202abefbc36f80899f9b9b54ba17be
|
fabfile.py
|
fabfile.py
|
# -*- coding: utf-8 -*
"""
Simple fabric file to test oinspect output
"""
from __future__ import print_function
import webbrowser
import oinspect.sphinxify as oi
def test_basic():
"""Test with an empty context"""
docstring = 'A test'
content = oi.sphinxify(docstring, oi.generate_context())
page_name = '/tmp/test_basic.html'
with open(page_name, 'w') as f:
f.write(content)
webbrowser.open_new_tab(page_name)
def run_all():
"""Run all tests"""
test_basic()
|
Add a fabric file to test the generated output
|
Add a fabric file to test the generated output
|
Python
|
bsd-3-clause
|
spyder-ide/docrepr,spyder-ide/docrepr,techtonik/docrepr,spyder-ide/docrepr,techtonik/docrepr,techtonik/docrepr
|
Add a fabric file to test the generated output
|
# -*- coding: utf-8 -*
"""
Simple fabric file to test oinspect output
"""
from __future__ import print_function
import webbrowser
import oinspect.sphinxify as oi
def test_basic():
"""Test with an empty context"""
docstring = 'A test'
content = oi.sphinxify(docstring, oi.generate_context())
page_name = '/tmp/test_basic.html'
with open(page_name, 'w') as f:
f.write(content)
webbrowser.open_new_tab(page_name)
def run_all():
"""Run all tests"""
test_basic()
|
<commit_before><commit_msg>Add a fabric file to test the generated output<commit_after>
|
# -*- coding: utf-8 -*
"""
Simple fabric file to test oinspect output
"""
from __future__ import print_function
import webbrowser
import oinspect.sphinxify as oi
def test_basic():
"""Test with an empty context"""
docstring = 'A test'
content = oi.sphinxify(docstring, oi.generate_context())
page_name = '/tmp/test_basic.html'
with open(page_name, 'w') as f:
f.write(content)
webbrowser.open_new_tab(page_name)
def run_all():
"""Run all tests"""
test_basic()
|
Add a fabric file to test the generated output# -*- coding: utf-8 -*
"""
Simple fabric file to test oinspect output
"""
from __future__ import print_function
import webbrowser
import oinspect.sphinxify as oi
def test_basic():
"""Test with an empty context"""
docstring = 'A test'
content = oi.sphinxify(docstring, oi.generate_context())
page_name = '/tmp/test_basic.html'
with open(page_name, 'w') as f:
f.write(content)
webbrowser.open_new_tab(page_name)
def run_all():
"""Run all tests"""
test_basic()
|
<commit_before><commit_msg>Add a fabric file to test the generated output<commit_after># -*- coding: utf-8 -*
"""
Simple fabric file to test oinspect output
"""
from __future__ import print_function
import webbrowser
import oinspect.sphinxify as oi
def test_basic():
"""Test with an empty context"""
docstring = 'A test'
content = oi.sphinxify(docstring, oi.generate_context())
page_name = '/tmp/test_basic.html'
with open(page_name, 'w') as f:
f.write(content)
webbrowser.open_new_tab(page_name)
def run_all():
"""Run all tests"""
test_basic()
|
|
c5da3ee962a05c05d55fd98149c1095a57f03e36
|
test/shots/test_task_types_for_shot.py
|
test/shots/test_task_types_for_shot.py
|
from test.base import ApiDBTestCase
class ShotTaskTypesTestCase(ApiDBTestCase):
def setUp(self):
super(ShotTaskTypesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_entity()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task_status()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_shot_task()
def test_get_task_types_for_shot(self):
task_types = self.get("/data/shots/%s/task-types" % self.shot.id)
self.assertEquals(len(task_types), 1)
self.assertDictEqual(
task_types[0],
self.task_type_animation.serialize()
)
|
Add tests for task types for shot route
|
Add tests for task types for shot route
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add tests for task types for shot route
|
from test.base import ApiDBTestCase
class ShotTaskTypesTestCase(ApiDBTestCase):
def setUp(self):
super(ShotTaskTypesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_entity()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task_status()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_shot_task()
def test_get_task_types_for_shot(self):
task_types = self.get("/data/shots/%s/task-types" % self.shot.id)
self.assertEquals(len(task_types), 1)
self.assertDictEqual(
task_types[0],
self.task_type_animation.serialize()
)
|
<commit_before><commit_msg>Add tests for task types for shot route<commit_after>
|
from test.base import ApiDBTestCase
class ShotTaskTypesTestCase(ApiDBTestCase):
def setUp(self):
super(ShotTaskTypesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_entity()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task_status()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_shot_task()
def test_get_task_types_for_shot(self):
task_types = self.get("/data/shots/%s/task-types" % self.shot.id)
self.assertEquals(len(task_types), 1)
self.assertDictEqual(
task_types[0],
self.task_type_animation.serialize()
)
|
Add tests for task types for shot routefrom test.base import ApiDBTestCase
class ShotTaskTypesTestCase(ApiDBTestCase):
def setUp(self):
super(ShotTaskTypesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_entity()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task_status()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_shot_task()
def test_get_task_types_for_shot(self):
task_types = self.get("/data/shots/%s/task-types" % self.shot.id)
self.assertEquals(len(task_types), 1)
self.assertDictEqual(
task_types[0],
self.task_type_animation.serialize()
)
|
<commit_before><commit_msg>Add tests for task types for shot route<commit_after>from test.base import ApiDBTestCase
class ShotTaskTypesTestCase(ApiDBTestCase):
def setUp(self):
super(ShotTaskTypesTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_entity_type()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_entity()
self.generate_fixture_person()
self.generate_fixture_assigner()
self.generate_fixture_task_status()
self.generate_fixture_department()
self.generate_fixture_task_type()
self.generate_fixture_shot_task()
def test_get_task_types_for_shot(self):
task_types = self.get("/data/shots/%s/task-types" % self.shot.id)
self.assertEquals(len(task_types), 1)
self.assertDictEqual(
task_types[0],
self.task_type_animation.serialize()
)
|
|
0942d2ccf68b88db2616f9839c1ca1ebfacb8ad9
|
migration/versions/013_dataset_serp.py
|
migration/versions/013_dataset_serp.py
|
from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dataset = Table('dataset', meta, autoload=True)
serp_title = Column('serp_title', Unicode())
serp_title.create(dataset)
serp_teaser = Column('serp_teaser', Unicode())
serp_teaser.create(dataset)
|
Migrate in domain model changes
|
Migrate in domain model changes
|
Python
|
agpl-3.0
|
johnjohndoe/spendb,CivicVision/datahub,USStateDept/FPA_Core,USStateDept/FPA_Core,spendb/spendb,openspending/spendb,pudo/spendb,johnjohndoe/spendb,nathanhilbert/FPA_Core,spendb/spendb,nathanhilbert/FPA_Core,CivicVision/datahub,spendb/spendb,nathanhilbert/FPA_Core,CivicVision/datahub,openspending/spendb,openspending/spendb,pudo/spendb,pudo/spendb,USStateDept/FPA_Core,johnjohndoe/spendb
|
Migrate in domain model changes
|
from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dataset = Table('dataset', meta, autoload=True)
serp_title = Column('serp_title', Unicode())
serp_title.create(dataset)
serp_teaser = Column('serp_teaser', Unicode())
serp_teaser.create(dataset)
|
<commit_before><commit_msg>Migrate in domain model changes <commit_after>
|
from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dataset = Table('dataset', meta, autoload=True)
serp_title = Column('serp_title', Unicode())
serp_title.create(dataset)
serp_teaser = Column('serp_teaser', Unicode())
serp_teaser.create(dataset)
|
Migrate in domain model changes from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dataset = Table('dataset', meta, autoload=True)
serp_title = Column('serp_title', Unicode())
serp_title.create(dataset)
serp_teaser = Column('serp_teaser', Unicode())
serp_teaser.create(dataset)
|
<commit_before><commit_msg>Migrate in domain model changes <commit_after>from sqlalchemy import *
from migrate import *
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dataset = Table('dataset', meta, autoload=True)
serp_title = Column('serp_title', Unicode())
serp_title.create(dataset)
serp_teaser = Column('serp_teaser', Unicode())
serp_teaser.create(dataset)
|
|
51faed84f4d56fe3455a6568bdadbc9b16196175
|
day5-1.py
|
day5-1.py
|
"""Module to find the passowrd on a bunny door."""
import hashlib
def main():
"""Run the main function."""
id = 'cxdnnyjw'
password = []
begin = '00000'
index = 0
while len(password) < 8:
test = id + str(index)
if begin == hashlib.md5(test).hexdigest()[0:5]:
password.append(hashlib.md5(test).hexdigest()[5])
index += 1
print('The password is {}').format(''.join(password))
if __name__ == '__main__':
main()
|
Add day 5 part 1.
|
Add day 5 part 1.
|
Python
|
mit
|
SayWhat1/adventofcode2016
|
Add day 5 part 1.
|
"""Module to find the passowrd on a bunny door."""
import hashlib
def main():
"""Run the main function."""
id = 'cxdnnyjw'
password = []
begin = '00000'
index = 0
while len(password) < 8:
test = id + str(index)
if begin == hashlib.md5(test).hexdigest()[0:5]:
password.append(hashlib.md5(test).hexdigest()[5])
index += 1
print('The password is {}').format(''.join(password))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 5 part 1.<commit_after>
|
"""Module to find the passowrd on a bunny door."""
import hashlib
def main():
"""Run the main function."""
id = 'cxdnnyjw'
password = []
begin = '00000'
index = 0
while len(password) < 8:
test = id + str(index)
if begin == hashlib.md5(test).hexdigest()[0:5]:
password.append(hashlib.md5(test).hexdigest()[5])
index += 1
print('The password is {}').format(''.join(password))
if __name__ == '__main__':
main()
|
Add day 5 part 1."""Module to find the passowrd on a bunny door."""
import hashlib
def main():
"""Run the main function."""
id = 'cxdnnyjw'
password = []
begin = '00000'
index = 0
while len(password) < 8:
test = id + str(index)
if begin == hashlib.md5(test).hexdigest()[0:5]:
password.append(hashlib.md5(test).hexdigest()[5])
index += 1
print('The password is {}').format(''.join(password))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add day 5 part 1.<commit_after>"""Module to find the passowrd on a bunny door."""
import hashlib
def main():
"""Run the main function."""
id = 'cxdnnyjw'
password = []
begin = '00000'
index = 0
while len(password) < 8:
test = id + str(index)
if begin == hashlib.md5(test).hexdigest()[0:5]:
password.append(hashlib.md5(test).hexdigest()[5])
index += 1
print('The password is {}').format(''.join(password))
if __name__ == '__main__':
main()
|
|
76ccb3e14da170000c8071203e931eeb8bc7c642
|
tests/test_deepcopy.py
|
tests/test_deepcopy.py
|
from tests.models import (
Cat,
Location,
)
import copy
from rest_framework.test import APITestCase
class DeepcopyTestCase(APITestCase):
def test_cat(self):
home = Location(name='Home', blob='ILUVU')
papa = Cat(name='Papa')
kitkat = Cat(name='KitKat', home=home, parent=papa)
kitkat_clone = copy.deepcopy(kitkat)
self.assertEquals(kitkat.name, kitkat_clone.name)
self.assertEquals(kitkat.home.name, kitkat_clone.home.name)
self.assertEquals(kitkat.parent.name, kitkat_clone.parent.name)
|
Add a test case for deepcopy
|
Add a test case for deepcopy
|
Python
|
mit
|
AltSchool/dynamic-rest-client
|
Add a test case for deepcopy
|
from tests.models import (
Cat,
Location,
)
import copy
from rest_framework.test import APITestCase
class DeepcopyTestCase(APITestCase):
def test_cat(self):
home = Location(name='Home', blob='ILUVU')
papa = Cat(name='Papa')
kitkat = Cat(name='KitKat', home=home, parent=papa)
kitkat_clone = copy.deepcopy(kitkat)
self.assertEquals(kitkat.name, kitkat_clone.name)
self.assertEquals(kitkat.home.name, kitkat_clone.home.name)
self.assertEquals(kitkat.parent.name, kitkat_clone.parent.name)
|
<commit_before><commit_msg>Add a test case for deepcopy<commit_after>
|
from tests.models import (
Cat,
Location,
)
import copy
from rest_framework.test import APITestCase
class DeepcopyTestCase(APITestCase):
def test_cat(self):
home = Location(name='Home', blob='ILUVU')
papa = Cat(name='Papa')
kitkat = Cat(name='KitKat', home=home, parent=papa)
kitkat_clone = copy.deepcopy(kitkat)
self.assertEquals(kitkat.name, kitkat_clone.name)
self.assertEquals(kitkat.home.name, kitkat_clone.home.name)
self.assertEquals(kitkat.parent.name, kitkat_clone.parent.name)
|
Add a test case for deepcopyfrom tests.models import (
Cat,
Location,
)
import copy
from rest_framework.test import APITestCase
class DeepcopyTestCase(APITestCase):
def test_cat(self):
home = Location(name='Home', blob='ILUVU')
papa = Cat(name='Papa')
kitkat = Cat(name='KitKat', home=home, parent=papa)
kitkat_clone = copy.deepcopy(kitkat)
self.assertEquals(kitkat.name, kitkat_clone.name)
self.assertEquals(kitkat.home.name, kitkat_clone.home.name)
self.assertEquals(kitkat.parent.name, kitkat_clone.parent.name)
|
<commit_before><commit_msg>Add a test case for deepcopy<commit_after>from tests.models import (
Cat,
Location,
)
import copy
from rest_framework.test import APITestCase
class DeepcopyTestCase(APITestCase):
def test_cat(self):
home = Location(name='Home', blob='ILUVU')
papa = Cat(name='Papa')
kitkat = Cat(name='KitKat', home=home, parent=papa)
kitkat_clone = copy.deepcopy(kitkat)
self.assertEquals(kitkat.name, kitkat_clone.name)
self.assertEquals(kitkat.home.name, kitkat_clone.home.name)
self.assertEquals(kitkat.parent.name, kitkat_clone.parent.name)
|
|
47d770c6008116dd72c6c6b4572a0a92faa39e66
|
test/test_update.py
|
test/test_update.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
basedir = os.path.realpath('..')
if basedir not in sys.path:
sys.path.append(basedir)
import update as up
# logging
LOGFORMAT_STDOUT = {
logging.DEBUG: '%(module)s:%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'}
# --- root logger
rootlogger = logging.getLogger('sbnredirect')
rootlogger.setLevel(logging.DEBUG)
lvl_config_logger = logging.DEBUG
console = logging.StreamHandler()
console.setLevel(lvl_config_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
if __name__ == '__main__':
CONFIG_FILENAME = 'update.cfg'
config_file = os.path.realpath(os.path.join('..', CONFIG_FILENAME))
rootlogger.debug(config_file)
up.read_config(config_file)
|
Add test file for update.py
|
Add test file for update.py
|
Python
|
mit
|
CristianCantoro/sbntoolkit,CristianCantoro/sbntoolkit
|
Add test file for update.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
basedir = os.path.realpath('..')
if basedir not in sys.path:
sys.path.append(basedir)
import update as up
# logging
LOGFORMAT_STDOUT = {
logging.DEBUG: '%(module)s:%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'}
# --- root logger
rootlogger = logging.getLogger('sbnredirect')
rootlogger.setLevel(logging.DEBUG)
lvl_config_logger = logging.DEBUG
console = logging.StreamHandler()
console.setLevel(lvl_config_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
if __name__ == '__main__':
CONFIG_FILENAME = 'update.cfg'
config_file = os.path.realpath(os.path.join('..', CONFIG_FILENAME))
rootlogger.debug(config_file)
up.read_config(config_file)
|
<commit_before><commit_msg>Add test file for update.py<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
basedir = os.path.realpath('..')
if basedir not in sys.path:
sys.path.append(basedir)
import update as up
# logging
LOGFORMAT_STDOUT = {
logging.DEBUG: '%(module)s:%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'}
# --- root logger
rootlogger = logging.getLogger('sbnredirect')
rootlogger.setLevel(logging.DEBUG)
lvl_config_logger = logging.DEBUG
console = logging.StreamHandler()
console.setLevel(lvl_config_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
if __name__ == '__main__':
CONFIG_FILENAME = 'update.cfg'
config_file = os.path.realpath(os.path.join('..', CONFIG_FILENAME))
rootlogger.debug(config_file)
up.read_config(config_file)
|
Add test file for update.py#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
basedir = os.path.realpath('..')
if basedir not in sys.path:
sys.path.append(basedir)
import update as up
# logging
LOGFORMAT_STDOUT = {
logging.DEBUG: '%(module)s:%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'}
# --- root logger
rootlogger = logging.getLogger('sbnredirect')
rootlogger.setLevel(logging.DEBUG)
lvl_config_logger = logging.DEBUG
console = logging.StreamHandler()
console.setLevel(lvl_config_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
if __name__ == '__main__':
CONFIG_FILENAME = 'update.cfg'
config_file = os.path.realpath(os.path.join('..', CONFIG_FILENAME))
rootlogger.debug(config_file)
up.read_config(config_file)
|
<commit_before><commit_msg>Add test file for update.py<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
basedir = os.path.realpath('..')
if basedir not in sys.path:
sys.path.append(basedir)
import update as up
# logging
LOGFORMAT_STDOUT = {
logging.DEBUG: '%(module)s:%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'}
# --- root logger
rootlogger = logging.getLogger('sbnredirect')
rootlogger.setLevel(logging.DEBUG)
lvl_config_logger = logging.DEBUG
console = logging.StreamHandler()
console.setLevel(lvl_config_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
if __name__ == '__main__':
CONFIG_FILENAME = 'update.cfg'
config_file = os.path.realpath(os.path.join('..', CONFIG_FILENAME))
rootlogger.debug(config_file)
up.read_config(config_file)
|
|
97b933815dcbc179e25bc9c1c16cfa1153036ae1
|
performance_tests/epsilon_convolution.py
|
performance_tests/epsilon_convolution.py
|
#!/usr/bin/python3
'''
Convolution
'''
from __future__ import print_function
import numpy as np
import cProfile
import random
import matplotlib.pyplot as plt
def eps(s, t_membran):
return np.exp(-s / t_membran)
def small_spiketrain():
# 1000 timesteps
# With 10 random spikes
s = np.array([0]*1000)
for i in range(10):
index = random.randint(0, 1000)
s[index] = 1
return s
def big_spiketrain():
# 1.000.000 timesteps Spiketrain
# With 10.000 random spikes
s = np.array([0]*1000000)
for i in range(10000):
index = random.randint(0, 1000000)
s[index] = 1
return s
if __name__ == "__main__":
import matplotlib.pyplot as plt
t_current = 0.3
t_membran = 20
# Epsilon Function as a vector
x = np.linspace(0, 200, 200)
epsilon_vector = eps(x, t_membran)
# Spiketrain
s = big_spiketrain()
# Convolute
s = (np.convolve(s, epsilon_vector, 'same'))
cProfile.run('np.convolve(s, epsilon_vector, "same")')
plt.plot(s, label='Convoluted Spiketrain')
plt.plot(x, epsilon_vector, label='epsilon vector')
plt.legend()
plt.show()
|
Add performance test for epsilon convolution
|
Add performance test for epsilon convolution
|
Python
|
bsd-2-clause
|
timqian/neurons,johannesmik/neurons
|
Add performance test for epsilon convolution
|
#!/usr/bin/python3
'''
Convolution
'''
from __future__ import print_function
import numpy as np
import cProfile
import random
import matplotlib.pyplot as plt
def eps(s, t_membran):
return np.exp(-s / t_membran)
def small_spiketrain():
# 1000 timesteps
# With 10 random spikes
s = np.array([0]*1000)
for i in range(10):
index = random.randint(0, 1000)
s[index] = 1
return s
def big_spiketrain():
# 1.000.000 timesteps Spiketrain
# With 10.000 random spikes
s = np.array([0]*1000000)
for i in range(10000):
index = random.randint(0, 1000000)
s[index] = 1
return s
if __name__ == "__main__":
import matplotlib.pyplot as plt
t_current = 0.3
t_membran = 20
# Epsilon Function as a vector
x = np.linspace(0, 200, 200)
epsilon_vector = eps(x, t_membran)
# Spiketrain
s = big_spiketrain()
# Convolute
s = (np.convolve(s, epsilon_vector, 'same'))
cProfile.run('np.convolve(s, epsilon_vector, "same")')
plt.plot(s, label='Convoluted Spiketrain')
plt.plot(x, epsilon_vector, label='epsilon vector')
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add performance test for epsilon convolution<commit_after>
|
#!/usr/bin/python3
'''
Convolution
'''
from __future__ import print_function
import numpy as np
import cProfile
import random
import matplotlib.pyplot as plt
def eps(s, t_membran):
return np.exp(-s / t_membran)
def small_spiketrain():
# 1000 timesteps
# With 10 random spikes
s = np.array([0]*1000)
for i in range(10):
index = random.randint(0, 1000)
s[index] = 1
return s
def big_spiketrain():
# 1.000.000 timesteps Spiketrain
# With 10.000 random spikes
s = np.array([0]*1000000)
for i in range(10000):
index = random.randint(0, 1000000)
s[index] = 1
return s
if __name__ == "__main__":
import matplotlib.pyplot as plt
t_current = 0.3
t_membran = 20
# Epsilon Function as a vector
x = np.linspace(0, 200, 200)
epsilon_vector = eps(x, t_membran)
# Spiketrain
s = big_spiketrain()
# Convolute
s = (np.convolve(s, epsilon_vector, 'same'))
cProfile.run('np.convolve(s, epsilon_vector, "same")')
plt.plot(s, label='Convoluted Spiketrain')
plt.plot(x, epsilon_vector, label='epsilon vector')
plt.legend()
plt.show()
|
Add performance test for epsilon convolution#!/usr/bin/python3
'''
Convolution
'''
from __future__ import print_function
import numpy as np
import cProfile
import random
import matplotlib.pyplot as plt
def eps(s, t_membran):
return np.exp(-s / t_membran)
def small_spiketrain():
# 1000 timesteps
# With 10 random spikes
s = np.array([0]*1000)
for i in range(10):
index = random.randint(0, 1000)
s[index] = 1
return s
def big_spiketrain():
# 1.000.000 timesteps Spiketrain
# With 10.000 random spikes
s = np.array([0]*1000000)
for i in range(10000):
index = random.randint(0, 1000000)
s[index] = 1
return s
if __name__ == "__main__":
import matplotlib.pyplot as plt
t_current = 0.3
t_membran = 20
# Epsilon Function as a vector
x = np.linspace(0, 200, 200)
epsilon_vector = eps(x, t_membran)
# Spiketrain
s = big_spiketrain()
# Convolute
s = (np.convolve(s, epsilon_vector, 'same'))
cProfile.run('np.convolve(s, epsilon_vector, "same")')
plt.plot(s, label='Convoluted Spiketrain')
plt.plot(x, epsilon_vector, label='epsilon vector')
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add performance test for epsilon convolution<commit_after>#!/usr/bin/python3
'''
Convolution
'''
from __future__ import print_function
import numpy as np
import cProfile
import random
import matplotlib.pyplot as plt
def eps(s, t_membran):
return np.exp(-s / t_membran)
def small_spiketrain():
# 1000 timesteps
# With 10 random spikes
s = np.array([0]*1000)
for i in range(10):
index = random.randint(0, 1000)
s[index] = 1
return s
def big_spiketrain():
# 1.000.000 timesteps Spiketrain
# With 10.000 random spikes
s = np.array([0]*1000000)
for i in range(10000):
index = random.randint(0, 1000000)
s[index] = 1
return s
if __name__ == "__main__":
import matplotlib.pyplot as plt
t_current = 0.3
t_membran = 20
# Epsilon Function as a vector
x = np.linspace(0, 200, 200)
epsilon_vector = eps(x, t_membran)
# Spiketrain
s = big_spiketrain()
# Convolute
s = (np.convolve(s, epsilon_vector, 'same'))
cProfile.run('np.convolve(s, epsilon_vector, "same")')
plt.plot(s, label='Convoluted Spiketrain')
plt.plot(x, epsilon_vector, label='epsilon vector')
plt.legend()
plt.show()
|
|
2e7252fab4667047c04b540040d5ad2287a73299
|
parrainage/app/management/commands/import_geoloc.py
|
parrainage/app/management/commands/import_geoloc.py
|
# Copyright 2017 Raphaël Hertzog
#
# This file is subject to the license terms in the LICENSE file found in
# the top-level directory of this distribution.
import argparse
from datetime import datetime
import csv
import logging
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from parrainage.app.models import Elu
class Command(BaseCommand):
help = 'Import a CSV file with data about mayors'
def add_arguments(self, parser):
parser.add_argument('csvfile', help='Path of the CSV file',
type=argparse.FileType(mode='r', encoding='utf-8'))
@transaction.atomic
def handle(self, *args, **kwargs):
csvfile = csv.DictReader(kwargs['csvfile'], delimiter=';',
restkey='addresses')
for row in csvfile:
done = False
for elu in Elu.objects.filter(city_code=row['city_code']):
elu.city_address = '\n'.join(row.get('addresses', [])) or ''
elu.city_zipcode = row['city_zipcode'] or ''
elu.city_latitude = row['latitude'] or ''
elu.city_longitude = row['longitude'] or ''
elu.save()
done = True
if not done:
sys.stderr.write(
'Unknown city code: {}\n'.format(row['city_code']))
|
Add management command to import geolocation data
|
Add management command to import geolocation data
|
Python
|
mit
|
rhertzog/parrainage,rhertzog/parrainage,rhertzog/parrainage
|
Add management command to import geolocation data
|
# Copyright 2017 Raphaël Hertzog
#
# This file is subject to the license terms in the LICENSE file found in
# the top-level directory of this distribution.
import argparse
from datetime import datetime
import csv
import logging
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from parrainage.app.models import Elu
class Command(BaseCommand):
help = 'Import a CSV file with data about mayors'
def add_arguments(self, parser):
parser.add_argument('csvfile', help='Path of the CSV file',
type=argparse.FileType(mode='r', encoding='utf-8'))
@transaction.atomic
def handle(self, *args, **kwargs):
csvfile = csv.DictReader(kwargs['csvfile'], delimiter=';',
restkey='addresses')
for row in csvfile:
done = False
for elu in Elu.objects.filter(city_code=row['city_code']):
elu.city_address = '\n'.join(row.get('addresses', [])) or ''
elu.city_zipcode = row['city_zipcode'] or ''
elu.city_latitude = row['latitude'] or ''
elu.city_longitude = row['longitude'] or ''
elu.save()
done = True
if not done:
sys.stderr.write(
'Unknown city code: {}\n'.format(row['city_code']))
|
<commit_before><commit_msg>Add management command to import geolocation data<commit_after>
|
# Copyright 2017 Raphaël Hertzog
#
# This file is subject to the license terms in the LICENSE file found in
# the top-level directory of this distribution.
import argparse
from datetime import datetime
import csv
import logging
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from parrainage.app.models import Elu
class Command(BaseCommand):
help = 'Import a CSV file with data about mayors'
def add_arguments(self, parser):
parser.add_argument('csvfile', help='Path of the CSV file',
type=argparse.FileType(mode='r', encoding='utf-8'))
@transaction.atomic
def handle(self, *args, **kwargs):
csvfile = csv.DictReader(kwargs['csvfile'], delimiter=';',
restkey='addresses')
for row in csvfile:
done = False
for elu in Elu.objects.filter(city_code=row['city_code']):
elu.city_address = '\n'.join(row.get('addresses', [])) or ''
elu.city_zipcode = row['city_zipcode'] or ''
elu.city_latitude = row['latitude'] or ''
elu.city_longitude = row['longitude'] or ''
elu.save()
done = True
if not done:
sys.stderr.write(
'Unknown city code: {}\n'.format(row['city_code']))
|
Add management command to import geolocation data# Copyright 2017 Raphaël Hertzog
#
# This file is subject to the license terms in the LICENSE file found in
# the top-level directory of this distribution.
import argparse
from datetime import datetime
import csv
import logging
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from parrainage.app.models import Elu
class Command(BaseCommand):
help = 'Import a CSV file with data about mayors'
def add_arguments(self, parser):
parser.add_argument('csvfile', help='Path of the CSV file',
type=argparse.FileType(mode='r', encoding='utf-8'))
@transaction.atomic
def handle(self, *args, **kwargs):
csvfile = csv.DictReader(kwargs['csvfile'], delimiter=';',
restkey='addresses')
for row in csvfile:
done = False
for elu in Elu.objects.filter(city_code=row['city_code']):
elu.city_address = '\n'.join(row.get('addresses', [])) or ''
elu.city_zipcode = row['city_zipcode'] or ''
elu.city_latitude = row['latitude'] or ''
elu.city_longitude = row['longitude'] or ''
elu.save()
done = True
if not done:
sys.stderr.write(
'Unknown city code: {}\n'.format(row['city_code']))
|
<commit_before><commit_msg>Add management command to import geolocation data<commit_after># Copyright 2017 Raphaël Hertzog
#
# This file is subject to the license terms in the LICENSE file found in
# the top-level directory of this distribution.
import argparse
from datetime import datetime
import csv
import logging
import sys
from django.core.management.base import BaseCommand
from django.db import transaction
from parrainage.app.models import Elu
class Command(BaseCommand):
help = 'Import a CSV file with data about mayors'
def add_arguments(self, parser):
parser.add_argument('csvfile', help='Path of the CSV file',
type=argparse.FileType(mode='r', encoding='utf-8'))
@transaction.atomic
def handle(self, *args, **kwargs):
csvfile = csv.DictReader(kwargs['csvfile'], delimiter=';',
restkey='addresses')
for row in csvfile:
done = False
for elu in Elu.objects.filter(city_code=row['city_code']):
elu.city_address = '\n'.join(row.get('addresses', [])) or ''
elu.city_zipcode = row['city_zipcode'] or ''
elu.city_latitude = row['latitude'] or ''
elu.city_longitude = row['longitude'] or ''
elu.save()
done = True
if not done:
sys.stderr.write(
'Unknown city code: {}\n'.format(row['city_code']))
|
|
be4abd8d3b54ab66f89c88e56cb948d5bf5f5725
|
stoneridge_info_gatherer.py
|
stoneridge_info_gatherer.py
|
#!/usr/bin/env python
try:
import configparser
except ImportError:
import ConfigParser as configparser
import json
import os
import platform
import stoneridge
class StoneRidgeInfoGatherer(object):
def run(self):
info_file = os.path.join(stoneridge.bindir, 'application.ini')
cp = configparser.SafeConfigParser()
cp.read([info_file])
build_info = {}
build_info['name'] = cp.get('App', 'Name')
build_info['version'] = cp.get('App', 'Version')
build_info['revision'] = cp.get('App', 'SourceStamp')
build_info['branch'] = ''
build_info['id'] = cp.get('App', 'BuildID')
machine_info = {}
machine_info['name'] = platform.node()
machine_info['os'] = stoneridge.os_name
machine_info['osversion'] = stoneridge.os_version
machine_info['platform'] = platform.machine()
info = {'test_machine':machine_info,
'test_build':build_info,
'testrun':{}}
with file(os.path.join(stoneridge.outdir, 'info.json'), 'w') as f:
json.dump(info, f)
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_arguments()
info_gatherer = StoneRidgeInfoGatherer()
info_gatherer.run()
|
Add the static info gatherer
|
Add the static info gatherer
|
Python
|
mpl-2.0
|
mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge
|
Add the static info gatherer
|
#!/usr/bin/env python
try:
import configparser
except ImportError:
import ConfigParser as configparser
import json
import os
import platform
import stoneridge
class StoneRidgeInfoGatherer(object):
def run(self):
info_file = os.path.join(stoneridge.bindir, 'application.ini')
cp = configparser.SafeConfigParser()
cp.read([info_file])
build_info = {}
build_info['name'] = cp.get('App', 'Name')
build_info['version'] = cp.get('App', 'Version')
build_info['revision'] = cp.get('App', 'SourceStamp')
build_info['branch'] = ''
build_info['id'] = cp.get('App', 'BuildID')
machine_info = {}
machine_info['name'] = platform.node()
machine_info['os'] = stoneridge.os_name
machine_info['osversion'] = stoneridge.os_version
machine_info['platform'] = platform.machine()
info = {'test_machine':machine_info,
'test_build':build_info,
'testrun':{}}
with file(os.path.join(stoneridge.outdir, 'info.json'), 'w') as f:
json.dump(info, f)
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_arguments()
info_gatherer = StoneRidgeInfoGatherer()
info_gatherer.run()
|
<commit_before><commit_msg>Add the static info gatherer<commit_after>
|
#!/usr/bin/env python
try:
import configparser
except ImportError:
import ConfigParser as configparser
import json
import os
import platform
import stoneridge
class StoneRidgeInfoGatherer(object):
def run(self):
info_file = os.path.join(stoneridge.bindir, 'application.ini')
cp = configparser.SafeConfigParser()
cp.read([info_file])
build_info = {}
build_info['name'] = cp.get('App', 'Name')
build_info['version'] = cp.get('App', 'Version')
build_info['revision'] = cp.get('App', 'SourceStamp')
build_info['branch'] = ''
build_info['id'] = cp.get('App', 'BuildID')
machine_info = {}
machine_info['name'] = platform.node()
machine_info['os'] = stoneridge.os_name
machine_info['osversion'] = stoneridge.os_version
machine_info['platform'] = platform.machine()
info = {'test_machine':machine_info,
'test_build':build_info,
'testrun':{}}
with file(os.path.join(stoneridge.outdir, 'info.json'), 'w') as f:
json.dump(info, f)
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_arguments()
info_gatherer = StoneRidgeInfoGatherer()
info_gatherer.run()
|
Add the static info gatherer#!/usr/bin/env python
try:
import configparser
except ImportError:
import ConfigParser as configparser
import json
import os
import platform
import stoneridge
class StoneRidgeInfoGatherer(object):
def run(self):
info_file = os.path.join(stoneridge.bindir, 'application.ini')
cp = configparser.SafeConfigParser()
cp.read([info_file])
build_info = {}
build_info['name'] = cp.get('App', 'Name')
build_info['version'] = cp.get('App', 'Version')
build_info['revision'] = cp.get('App', 'SourceStamp')
build_info['branch'] = ''
build_info['id'] = cp.get('App', 'BuildID')
machine_info = {}
machine_info['name'] = platform.node()
machine_info['os'] = stoneridge.os_name
machine_info['osversion'] = stoneridge.os_version
machine_info['platform'] = platform.machine()
info = {'test_machine':machine_info,
'test_build':build_info,
'testrun':{}}
with file(os.path.join(stoneridge.outdir, 'info.json'), 'w') as f:
json.dump(info, f)
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_arguments()
info_gatherer = StoneRidgeInfoGatherer()
info_gatherer.run()
|
<commit_before><commit_msg>Add the static info gatherer<commit_after>#!/usr/bin/env python
try:
import configparser
except ImportError:
import ConfigParser as configparser
import json
import os
import platform
import stoneridge
class StoneRidgeInfoGatherer(object):
def run(self):
info_file = os.path.join(stoneridge.bindir, 'application.ini')
cp = configparser.SafeConfigParser()
cp.read([info_file])
build_info = {}
build_info['name'] = cp.get('App', 'Name')
build_info['version'] = cp.get('App', 'Version')
build_info['revision'] = cp.get('App', 'SourceStamp')
build_info['branch'] = ''
build_info['id'] = cp.get('App', 'BuildID')
machine_info = {}
machine_info['name'] = platform.node()
machine_info['os'] = stoneridge.os_name
machine_info['osversion'] = stoneridge.os_version
machine_info['platform'] = platform.machine()
info = {'test_machine':machine_info,
'test_build':build_info,
'testrun':{}}
with file(os.path.join(stoneridge.outdir, 'info.json'), 'w') as f:
json.dump(info, f)
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_arguments()
info_gatherer = StoneRidgeInfoGatherer()
info_gatherer.run()
|
|
5a2394f8445350387adc30dd5bc818971aefc91d
|
lpthw/ex25.py
|
lpthw/ex25.py
|
def break_words(stuff):
"""This function will brea up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full senctence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of a sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
Add work for Exercise 25.
|
Add work for Exercise 25.
|
Python
|
mit
|
jaredmanning/learning,jaredmanning/learning
|
Add work for Exercise 25.
|
def break_words(stuff):
"""This function will brea up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full senctence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of a sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
<commit_before><commit_msg>Add work for Exercise 25.<commit_after>
|
def break_words(stuff):
"""This function will brea up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full senctence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of a sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
Add work for Exercise 25.def break_words(stuff):
"""This function will brea up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full senctence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of a sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
<commit_before><commit_msg>Add work for Exercise 25.<commit_after>def break_words(stuff):
"""This function will brea up words for us."""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts the words."""
return sorted(words)
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print word
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print word
def sort_sentence(sentence):
"""Takes in a full senctence and returns the sorted words."""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""Prints the first and last words of a sentence."""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
def print_first_and_last_sorted(sentence):
"""Sorts the words the prints the first and last one."""
words = sort_sentence(sentence)
print_first_word(words)
print_last_word(words)
|
|
bdd532cccf504dc9fbf21a9e72b8185dc910ec94
|
thezombies/management/commands/validate_all_data_catalogs.py
|
thezombies/management/commands/validate_all_data_catalogs.py
|
from django.core.management.base import NoArgsCommand
from thezombies.tasks.main import validate_data_catalogs
class Command(NoArgsCommand):
"""Validate all of the agency data catalogs"""
def handle_noargs(self):
validator_group = validate_data_catalogs.delay()
self.stdout.write(u"\nSpawned data catalog task group: {0}\n".format(validator_group.id))
|
Add management command for running the task for validating all data catalogs.
|
Add management command for running the task for validating all data catalogs.
|
Python
|
bsd-3-clause
|
sunlightlabs/thezombies,sunlightlabs/thezombies,sunlightlabs/thezombies,sunlightlabs/thezombies
|
Add management command for running the task for validating all data catalogs.
|
from django.core.management.base import NoArgsCommand
from thezombies.tasks.main import validate_data_catalogs
class Command(NoArgsCommand):
"""Validate all of the agency data catalogs"""
def handle_noargs(self):
validator_group = validate_data_catalogs.delay()
self.stdout.write(u"\nSpawned data catalog task group: {0}\n".format(validator_group.id))
|
<commit_before><commit_msg>Add management command for running the task for validating all data catalogs.<commit_after>
|
from django.core.management.base import NoArgsCommand
from thezombies.tasks.main import validate_data_catalogs
class Command(NoArgsCommand):
"""Validate all of the agency data catalogs"""
def handle_noargs(self):
validator_group = validate_data_catalogs.delay()
self.stdout.write(u"\nSpawned data catalog task group: {0}\n".format(validator_group.id))
|
Add management command for running the task for validating all data catalogs.from django.core.management.base import NoArgsCommand
from thezombies.tasks.main import validate_data_catalogs
class Command(NoArgsCommand):
"""Validate all of the agency data catalogs"""
def handle_noargs(self):
validator_group = validate_data_catalogs.delay()
self.stdout.write(u"\nSpawned data catalog task group: {0}\n".format(validator_group.id))
|
<commit_before><commit_msg>Add management command for running the task for validating all data catalogs.<commit_after>from django.core.management.base import NoArgsCommand
from thezombies.tasks.main import validate_data_catalogs
class Command(NoArgsCommand):
"""Validate all of the agency data catalogs"""
def handle_noargs(self):
validator_group = validate_data_catalogs.delay()
self.stdout.write(u"\nSpawned data catalog task group: {0}\n".format(validator_group.id))
|
|
24e14b7d53e43f1574971ff5b6eee6d0185df23a
|
rest_framework/tests/nested_relations.py
|
rest_framework/tests/nested_relations.py
|
from copy import deepcopy
from django.db import models
from django.test import TestCase
from rest_framework import serializers
# ForeignKey
class ForeignKeyTarget(models.Model):
name = models.CharField(max_length=100)
class ForeignKeySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
sources = ForeignKeySourceSerializer()
class Meta:
model = ForeignKeyTarget
class ReverseForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
self.target_data = {'id': 1, 'name': u'target-1', 'sources': [
{'id': 1, 'name': u'source-1', 'target': 1},
{'id': 2, 'name': u'source-2', 'target': 1},
{'id': 3, 'name': u'source-3', 'target': 1},
]}
self.new_target_data = {'id': 2, 'name': u'target-2', 'sources': []}
self.data = [self.target_data, self.new_target_data]
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
self.assertEquals(serializer.data, self.data)
def test_reverse_foreign_key_update(self):
data = deepcopy(self.target_data)
data['sources'][0]['name'] = 'source-1-changed'
data['sources'][2]['name'] = 'source-3-changed'
instance = ForeignKeyTarget.objects.get(pk=1)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEquals(serializer.data, data)
serializer.save()
# Ensure target 1 is updated, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
expected = deepcopy(self.data)
expected[0]['sources'][0]['name'] = 'source-1-changed'
expected[0]['sources'][2]['name'] = 'source-3-changed'
self.assertEquals(serializer.data, expected)
|
Add tests for retrieving/updating reverse fks
|
Add tests for retrieving/updating reverse fks
|
Python
|
bsd-2-clause
|
sehmaschine/django-rest-framework,qsorix/django-rest-framework,lubomir/django-rest-framework,nhorelik/django-rest-framework,James1345/django-rest-framework,canassa/django-rest-framework,yiyocx/django-rest-framework,thedrow/django-rest-framework-1,vstoykov/django-rest-framework,qsorix/django-rest-framework,jpulec/django-rest-framework,antonyc/django-rest-framework,kgeorgy/django-rest-framework,thedrow/django-rest-framework-1,rafaelang/django-rest-framework,damycra/django-rest-framework,HireAnEsquire/django-rest-framework,sbellem/django-rest-framework,rafaelcaricio/django-rest-framework,potpath/django-rest-framework,gregmuellegger/django-rest-framework,thedrow/django-rest-framework-1,waytai/django-rest-framework,akalipetis/django-rest-framework,uploadcare/django-rest-framework,ajaali/django-rest-framework,nhorelik/django-rest-framework,VishvajitP/django-rest-framework,wangpanjun/django-rest-framework,yiyocx/django-rest-framework,rhblind/django-rest-framework,andriy-s/django-rest-framework,aericson/django-rest-framework,rubendura/django-rest-framework,mgaitan/django-rest-framework,delinhabit/django-rest-framework,antonyc/django-rest-framework,rafaelcaricio/django-rest-framework,krinart/django-rest-framework,abdulhaq-e/django-rest-framework,raphaelmerx/django-rest-framework,waytai/django-rest-framework,douwevandermeij/django-rest-framework,sbellem/django-rest-framework,jness/django-rest-framework,VishvajitP/django-rest-framework,rhblind/django-rest-framework,nryoung/django-rest-framework,alacritythief/django-rest-framework,agconti/django-rest-framework,wedaly/django-rest-framework,nryoung/django-rest-framework,akalipetis/django-rest-framework,sehmaschine/django-rest-framework,sehmaschine/django-rest-framework,rubendura/django-rest-framework,wedaly/django-rest-framework,potpath/django-rest-framework,callorico/django-rest-framework,zeldalink0515/django-rest-framework,hnakamur/django-rest-framework,canassa/django-rest-framework,hunter007/django-rest-framework,zeldalink0515/django-rest-framework,bluedazzle/django-rest-framework,adambain-vokal/django-rest-framework,MJafarMashhadi/django-rest-framework,ebsaral/django-rest-framework,davesque/django-rest-framework,justanr/django-rest-framework,paolopaolopaolo/django-rest-framework,abdulhaq-e/django-rest-framework,ossanna16/django-rest-framework,hnakamur/django-rest-framework,pombredanne/django-rest-framework,lubomir/django-rest-framework,sheppard/django-rest-framework,fishky/django-rest-framework,wwj718/django-rest-framework,tomchristie/django-rest-framework,HireAnEsquire/django-rest-framework,cyberj/django-rest-framework,tcroiset/django-rest-framework,gregmuellegger/django-rest-framework,vstoykov/django-rest-framework,jerryhebert/django-rest-framework,simudream/django-rest-framework,pombredanne/django-rest-framework,kennydude/django-rest-framework,ebsaral/django-rest-framework,nhorelik/django-rest-framework,AlexandreProenca/django-rest-framework,buptlsl/django-rest-framework,pombredanne/django-rest-framework,ticosax/django-rest-framework,kezabelle/django-rest-framework,kylefox/django-rest-framework,jpulec/django-rest-framework,jtiai/django-rest-framework,leeahoward/django-rest-framework,zeldalink0515/django-rest-framework,maryokhin/django-rest-framework,dmwyatt/django-rest-framework,MJafarMashhadi/django-rest-framework,potpath/django-rest-framework,tomchristie/django-rest-framework,waytai/django-rest-framework,yiyocx/django-rest-framework,justanr/django-rest-framework,hnarayanan/django-rest-framework,lubomir/django-rest-framework,rubendura/django-rest-framework,gregmuellegger/django-rest-framework,hnakamur/django-rest-framework,johnraz/django-rest-framework,werthen/django-rest-framework,callorico/django-rest-framework,leeahoward/django-rest-framework,werthen/django-rest-framework,bluedazzle/django-rest-framework,fishky/django-rest-framework,rafaelang/django-rest-framework,uploadcare/django-rest-framework,krinart/django-rest-framework,agconti/django-rest-framework,xiaotangyuan/django-rest-framework,delinhabit/django-rest-framework,YBJAY00000/django-rest-framework,abdulhaq-e/django-rest-framework,callorico/django-rest-framework,jerryhebert/django-rest-framework,adambain-vokal/django-rest-framework,alacritythief/django-rest-framework,wwj718/django-rest-framework,uruz/django-rest-framework,cyberj/django-rest-framework,James1345/django-rest-framework,maryokhin/django-rest-framework,paolopaolopaolo/django-rest-framework,simudream/django-rest-framework,tcroiset/django-rest-framework,justanr/django-rest-framework,kezabelle/django-rest-framework,mgaitan/django-rest-framework,YBJAY00000/django-rest-framework,edx/django-rest-framework,HireAnEsquire/django-rest-framework,alacritythief/django-rest-framework,linovia/django-rest-framework,hunter007/django-rest-framework,ambivalentno/django-rest-framework,jpadilla/django-rest-framework,hunter007/django-rest-framework,arpheno/django-rest-framework,wzbozon/django-rest-framework,rafaelang/django-rest-framework,ashishfinoit/django-rest-framework,ashishfinoit/django-rest-framework,jerryhebert/django-rest-framework,wzbozon/django-rest-framework,linovia/django-rest-framework,xiaotangyuan/django-rest-framework,ajaali/django-rest-framework,vstoykov/django-rest-framework,johnraz/django-rest-framework,atombrella/django-rest-framework,AlexandreProenca/django-rest-framework,jpulec/django-rest-framework,ticosax/django-rest-framework,tigeraniya/django-rest-framework,cheif/django-rest-framework,uruz/django-rest-framework,wedaly/django-rest-framework,kezabelle/django-rest-framework,ticosax/django-rest-framework,jtiai/django-rest-framework,elim/django-rest-framework,canassa/django-rest-framework,atombrella/django-rest-framework,maryokhin/django-rest-framework,tcroiset/django-rest-framework,sheppard/django-rest-framework,jpadilla/django-rest-framework,cheif/django-rest-framework,tigeraniya/django-rest-framework,tomchristie/django-rest-framework,andriy-s/django-rest-framework,hnarayanan/django-rest-framework,d0ugal/django-rest-framework,ebsaral/django-rest-framework,ezheidtmann/django-rest-framework,raphaelmerx/django-rest-framework,douwevandermeij/django-rest-framework,douwevandermeij/django-rest-framework,adambain-vokal/django-rest-framework,werthen/django-rest-framework,buptlsl/django-rest-framework,uruz/django-rest-framework,ezheidtmann/django-rest-framework,ossanna16/django-rest-framework,johnraz/django-rest-framework,d0ugal/django-rest-framework,delinhabit/django-rest-framework,buptlsl/django-rest-framework,davesque/django-rest-framework,edx/django-rest-framework,aericson/django-rest-framework,hnarayanan/django-rest-framework,James1345/django-rest-framework,tigeraniya/django-rest-framework,brandoncazander/django-rest-framework,sheppard/django-rest-framework,dmwyatt/django-rest-framework,elim/django-rest-framework,ambivalentno/django-rest-framework,raphaelmerx/django-rest-framework,kennydude/django-rest-framework,YBJAY00000/django-rest-framework,damycra/django-rest-framework,jness/django-rest-framework,agconti/django-rest-framework,uploadcare/django-rest-framework,paolopaolopaolo/django-rest-framework,kgeorgy/django-rest-framework,atombrella/django-rest-framework,bluedazzle/django-rest-framework,rhblind/django-rest-framework,akalipetis/django-rest-framework,brandoncazander/django-rest-framework,kennydude/django-rest-framework,iheitlager/django-rest-framework,dmwyatt/django-rest-framework,ezheidtmann/django-rest-framework,iheitlager/django-rest-framework,d0ugal/django-rest-framework,linovia/django-rest-framework,wwj718/django-rest-framework,brandoncazander/django-rest-framework,aericson/django-rest-framework,krinart/django-rest-framework,edx/django-rest-framework,wangpanjun/django-rest-framework,cheif/django-rest-framework,wangpanjun/django-rest-framework,sbellem/django-rest-framework,ambivalentno/django-rest-framework,AlexandreProenca/django-rest-framework,davesque/django-rest-framework,nryoung/django-rest-framework,arpheno/django-rest-framework,kylefox/django-rest-framework,qsorix/django-rest-framework,kylefox/django-rest-framework,fishky/django-rest-framework,kgeorgy/django-rest-framework,leeahoward/django-rest-framework,arpheno/django-rest-framework,rafaelcaricio/django-rest-framework,xiaotangyuan/django-rest-framework,jtiai/django-rest-framework,iheitlager/django-rest-framework,antonyc/django-rest-framework,mgaitan/django-rest-framework,ashishfinoit/django-rest-framework,wzbozon/django-rest-framework,cyberj/django-rest-framework,ajaali/django-rest-framework,ossanna16/django-rest-framework,jness/django-rest-framework,andriy-s/django-rest-framework,damycra/django-rest-framework,MJafarMashhadi/django-rest-framework,jpadilla/django-rest-framework,simudream/django-rest-framework,elim/django-rest-framework,VishvajitP/django-rest-framework
|
Add tests for retrieving/updating reverse fks
|
from copy import deepcopy
from django.db import models
from django.test import TestCase
from rest_framework import serializers
# ForeignKey
class ForeignKeyTarget(models.Model):
name = models.CharField(max_length=100)
class ForeignKeySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
sources = ForeignKeySourceSerializer()
class Meta:
model = ForeignKeyTarget
class ReverseForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
self.target_data = {'id': 1, 'name': u'target-1', 'sources': [
{'id': 1, 'name': u'source-1', 'target': 1},
{'id': 2, 'name': u'source-2', 'target': 1},
{'id': 3, 'name': u'source-3', 'target': 1},
]}
self.new_target_data = {'id': 2, 'name': u'target-2', 'sources': []}
self.data = [self.target_data, self.new_target_data]
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
self.assertEquals(serializer.data, self.data)
def test_reverse_foreign_key_update(self):
data = deepcopy(self.target_data)
data['sources'][0]['name'] = 'source-1-changed'
data['sources'][2]['name'] = 'source-3-changed'
instance = ForeignKeyTarget.objects.get(pk=1)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEquals(serializer.data, data)
serializer.save()
# Ensure target 1 is updated, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
expected = deepcopy(self.data)
expected[0]['sources'][0]['name'] = 'source-1-changed'
expected[0]['sources'][2]['name'] = 'source-3-changed'
self.assertEquals(serializer.data, expected)
|
<commit_before><commit_msg>Add tests for retrieving/updating reverse fks<commit_after>
|
from copy import deepcopy
from django.db import models
from django.test import TestCase
from rest_framework import serializers
# ForeignKey
class ForeignKeyTarget(models.Model):
name = models.CharField(max_length=100)
class ForeignKeySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
sources = ForeignKeySourceSerializer()
class Meta:
model = ForeignKeyTarget
class ReverseForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
self.target_data = {'id': 1, 'name': u'target-1', 'sources': [
{'id': 1, 'name': u'source-1', 'target': 1},
{'id': 2, 'name': u'source-2', 'target': 1},
{'id': 3, 'name': u'source-3', 'target': 1},
]}
self.new_target_data = {'id': 2, 'name': u'target-2', 'sources': []}
self.data = [self.target_data, self.new_target_data]
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
self.assertEquals(serializer.data, self.data)
def test_reverse_foreign_key_update(self):
data = deepcopy(self.target_data)
data['sources'][0]['name'] = 'source-1-changed'
data['sources'][2]['name'] = 'source-3-changed'
instance = ForeignKeyTarget.objects.get(pk=1)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEquals(serializer.data, data)
serializer.save()
# Ensure target 1 is updated, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
expected = deepcopy(self.data)
expected[0]['sources'][0]['name'] = 'source-1-changed'
expected[0]['sources'][2]['name'] = 'source-3-changed'
self.assertEquals(serializer.data, expected)
|
Add tests for retrieving/updating reverse fksfrom copy import deepcopy
from django.db import models
from django.test import TestCase
from rest_framework import serializers
# ForeignKey
class ForeignKeyTarget(models.Model):
name = models.CharField(max_length=100)
class ForeignKeySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
sources = ForeignKeySourceSerializer()
class Meta:
model = ForeignKeyTarget
class ReverseForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
self.target_data = {'id': 1, 'name': u'target-1', 'sources': [
{'id': 1, 'name': u'source-1', 'target': 1},
{'id': 2, 'name': u'source-2', 'target': 1},
{'id': 3, 'name': u'source-3', 'target': 1},
]}
self.new_target_data = {'id': 2, 'name': u'target-2', 'sources': []}
self.data = [self.target_data, self.new_target_data]
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
self.assertEquals(serializer.data, self.data)
def test_reverse_foreign_key_update(self):
data = deepcopy(self.target_data)
data['sources'][0]['name'] = 'source-1-changed'
data['sources'][2]['name'] = 'source-3-changed'
instance = ForeignKeyTarget.objects.get(pk=1)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEquals(serializer.data, data)
serializer.save()
# Ensure target 1 is updated, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
expected = deepcopy(self.data)
expected[0]['sources'][0]['name'] = 'source-1-changed'
expected[0]['sources'][2]['name'] = 'source-3-changed'
self.assertEquals(serializer.data, expected)
|
<commit_before><commit_msg>Add tests for retrieving/updating reverse fks<commit_after>from copy import deepcopy
from django.db import models
from django.test import TestCase
from rest_framework import serializers
# ForeignKey
class ForeignKeyTarget(models.Model):
name = models.CharField(max_length=100)
class ForeignKeySource(models.Model):
name = models.CharField(max_length=100)
target = models.ForeignKey(ForeignKeyTarget, related_name='sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
sources = ForeignKeySourceSerializer()
class Meta:
model = ForeignKeyTarget
class ReverseForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
self.target_data = {'id': 1, 'name': u'target-1', 'sources': [
{'id': 1, 'name': u'source-1', 'target': 1},
{'id': 2, 'name': u'source-2', 'target': 1},
{'id': 3, 'name': u'source-3', 'target': 1},
]}
self.new_target_data = {'id': 2, 'name': u'target-2', 'sources': []}
self.data = [self.target_data, self.new_target_data]
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
self.assertEquals(serializer.data, self.data)
def test_reverse_foreign_key_update(self):
data = deepcopy(self.target_data)
data['sources'][0]['name'] = 'source-1-changed'
data['sources'][2]['name'] = 'source-3-changed'
instance = ForeignKeyTarget.objects.get(pk=1)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
self.assertEquals(serializer.data, data)
serializer.save()
# Ensure target 1 is updated, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset)
expected = deepcopy(self.data)
expected[0]['sources'][0]['name'] = 'source-1-changed'
expected[0]['sources'][2]['name'] = 'source-3-changed'
self.assertEquals(serializer.data, expected)
|
|
13fd2335eb8b8b93e5330fe9bcc125557bffb198
|
ecommerce/extensions/payment/migrations/0012_auto_20161109_1456.py
|
ecommerce/extensions/payment/migrations/0012_auto_20161109_1456.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
|
Add missing migration for verbose_name alter
|
Add missing migration for verbose_name alter
|
Python
|
agpl-3.0
|
edx/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,edx/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce
|
Add missing migration for verbose_name alter
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
|
<commit_before><commit_msg>Add missing migration for verbose_name alter<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
|
Add missing migration for verbose_name alter# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
|
<commit_before><commit_msg>Add missing migration for verbose_name alter<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0011_paypalprocessorconfiguration'),
]
operations = [
migrations.AlterField(
model_name='paypalprocessorconfiguration',
name='retry_attempts',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Number of times to retry failing Paypal client actions (e.g., payment creation, payment execution)'),
),
]
|
|
0d7706db887bb5d1522f3de39b9fe1533f80fd8d
|
dota2parser.py
|
dota2parser.py
|
from bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
|
Add original script version, still not fit for general use
|
Add original script version, still not fit for general use
|
Python
|
mit
|
Vilkku/Dota-2-Hero-Parser
|
Add original script version, still not fit for general use
|
from bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
|
<commit_before><commit_msg>Add original script version, still not fit for general use<commit_after>
|
from bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
|
Add original script version, still not fit for general usefrom bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
|
<commit_before><commit_msg>Add original script version, still not fit for general use<commit_after>from bs4 import BeautifulSoup
import urllib.request
import MySQLdb
db = MySQLdb.connect(user="", passwd="", db="")
c = db.cursor()
c.execute("SELECT id, name FROM heroes WHERE active=1")
heroes = c.fetchall()
for hero_id, hero_name in heroes:
hero_url = 'https://www.dota2.com/hero/'+str(hero_name).replace(' ', '_').replace('\'', '')+'/'
print(hero_url)
response = urllib.request.urlopen(hero_url)
html = response.read()
soup = BeautifulSoup(html)
for overviewAbilityRow in soup.find_all('div', class_='overviewAbilityRow'):
img = overviewAbilityRow.find('img').get('src')
name = overviewAbilityRow.find('h2').string
description = overviewAbilityRow.find('p')
c.execute("INSERT INTO spells (hero_id, name, description, icon) VALUES (%s, %s, %s, %s)", (hero_id, name, description, img))
db.commit()
c.close()
db.close()
|
|
0f84eb57024bb856c10a6326a3827cb91e4d20c2
|
pyui_to_clipboard.py
|
pyui_to_clipboard.py
|
import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
|
Put the contents of a pyui file onto the clipboard
|
Put the contents of a pyui file onto the clipboard
For pasting up to GitHub, etc.
|
Python
|
apache-2.0
|
cclauss/Ten-lines-or-less
|
Put the contents of a pyui file onto the clipboard
For pasting up to GitHub, etc.
|
import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
|
<commit_before><commit_msg>Put the contents of a pyui file onto the clipboard
For pasting up to GitHub, etc.<commit_after>
|
import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
|
Put the contents of a pyui file onto the clipboard
For pasting up to GitHub, etc.import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
|
<commit_before><commit_msg>Put the contents of a pyui file onto the clipboard
For pasting up to GitHub, etc.<commit_after>import clipboard
filename = 'put_your_filename_here.pyui' # edit this line before running
with open(filename) as in_file:
clipboard.set(in_file.read())
print('The contents of {} are now on the clipboard.'.format(filename))
|
|
a3706e1c743ef7ec7f38375b116538a71ccb8455
|
rasterfairy/utils.py
|
rasterfairy/utils.py
|
def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
|
Add utilities to convert from python2 to python3.
|
Add utilities to convert from python2 to python3.
This commit only has one utility that allows to convert sorted()
function with custom elements comparison from parameter cmp (python2)
to parameter key (python3).
|
Python
|
bsd-3-clause
|
Quasimondo/RasterFairy
|
Add utilities to convert from python2 to python3.
This commit only has one utility that allows to convert sorted()
function with custom elements comparison from parameter cmp (python2)
to parameter key (python3).
|
def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
|
<commit_before><commit_msg>Add utilities to convert from python2 to python3.
This commit only has one utility that allows to convert sorted()
function with custom elements comparison from parameter cmp (python2)
to parameter key (python3).<commit_after>
|
def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
|
Add utilities to convert from python2 to python3.
This commit only has one utility that allows to convert sorted()
function with custom elements comparison from parameter cmp (python2)
to parameter key (python3).def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
|
<commit_before><commit_msg>Add utilities to convert from python2 to python3.
This commit only has one utility that allows to convert sorted()
function with custom elements comparison from parameter cmp (python2)
to parameter key (python3).<commit_after>def cmp_to_key(mycmp):
"""
Convert `sorted` function from python2 to python3.
This function is used to convert `cmp` parameter of python2 sorted
function into `key` parameter of python3 sorted function.
This code is taken from here:
https://docs.python.org/2/howto/sorting.html#the-old-way-using-the-cmp-parameter
:param mycmp: compare function that compares 2 values
:return: key class that compares 2 values
"""
"Convert a cmp= function into a key= function"
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
|
|
d26034963c0332346ea1b6b50b9ad3d637da7e36
|
spiff/payment/management/commands/attempt_payment.py
|
spiff/payment/management/commands/attempt_payment.py
|
from django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
|
Add script to try and push stripe payment of unpaid invoices
|
Add script to try and push stripe payment of unpaid invoices
|
Python
|
agpl-3.0
|
SYNHAK/spiff,SYNHAK/spiff,SYNHAK/spiff
|
Add script to try and push stripe payment of unpaid invoices
|
from django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
|
<commit_before><commit_msg>Add script to try and push stripe payment of unpaid invoices<commit_after>
|
from django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
|
Add script to try and push stripe payment of unpaid invoicesfrom django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
|
<commit_before><commit_msg>Add script to try and push stripe payment of unpaid invoices<commit_after>from django.core.management import BaseCommand
from spiff.payment.models import Invoice
import stripe
class Command(BaseCommand):
help = 'Attempts to process an invoice via stripe'
def handle(self, *args, **options):
for invoice in Invoice.objects.unpaid().all():
print invoice
try:
unpaid = invoice.unpaidBalance
invoice.chargeStripe()
print "Paid %s"%(unpaid)
except stripe.error.CardError, e:
print "Could not process card.", e
|
|
c5ed01ce81b1c0e459d93bf26bf96cdeb80a0344
|
Lib/defconAppKit/representationFactories/__init__.py
|
Lib/defconAppKit/representationFactories/__init__.py
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
|
Use specific notifications when possible.
|
Use specific notifications when possible.
|
Python
|
mit
|
typesupply/defconAppKit,typemytype/defconAppKit
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
Use specific notifications when possible.
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
|
<commit_before>from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
<commit_msg>Use specific notifications when possible.<commit_after>
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
|
from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
Use specific notifications when possible.from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
|
<commit_before>from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : NSBezierPathFactory,
"defconAppKit.NoComponentsNSBezierPath" : NoComponentsNSBezierPathFactory,
"defconAppKit.OnlyComponentsNSBezierPath" : OnlyComponentsNSBezierPathFactory,
"defconAppKit.GlyphCell" : GlyphCellFactory,
"defconAppKit.GlyphCellDetail" : GlyphCellDetailFactory,
"defconAppKit.OutlineInformation" : OutlineInformationFactory,
"defconAppKit.MenuImage" : MenuImageRepresentationFactory,
}
_imageFactories = {
"defconAppKit.NSImage" : NSImageFactory
}
def registerAllFactories():
for name, factory in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=None)
for name, factory in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=None)
<commit_msg>Use specific notifications when possible.<commit_after>from defcon import Glyph, Image, registerRepresentationFactory
from defconAppKit.representationFactories.nsBezierPathFactory import NSBezierPathFactory
from defconAppKit.representationFactories.glyphCellFactory import GlyphCellFactory
from defconAppKit.representationFactories.glyphCellDetailFactory import GlyphCellDetailFactory
from defconAppKit.representationFactories.glyphViewFactories import NoComponentsNSBezierPathFactory,\
OnlyComponentsNSBezierPathFactory, OutlineInformationFactory, NSImageFactory
from defconAppKit.representationFactories.menuImageFactory import MenuImageRepresentationFactory
_glyphFactories = {
"defconAppKit.NSBezierPath" : (NSBezierPathFactory, None),
"defconAppKit.NoComponentsNSBezierPath" : (NoComponentsNSBezierPathFactory, None),
"defconAppKit.OnlyComponentsNSBezierPath" : (OnlyComponentsNSBezierPathFactory, None),
"defconAppKit.GlyphCell" : (GlyphCellFactory, None),
"defconAppKit.GlyphCellDetail" : (GlyphCellDetailFactory, None),
"defconAppKit.OutlineInformation" : (OutlineInformationFactory, None),
"defconAppKit.MenuImage" : (MenuImageRepresentationFactory, None),
}
_imageFactories = {
"defconAppKit.NSImage" : (NSImageFactory, ["Image.FileNameChanged", "Image.ColorChanged", "Image.ImageDataChanged"])
}
def registerAllFactories():
for name, (factory, destructiveNotifications) in _glyphFactories.items():
registerRepresentationFactory(Glyph, name, factory, destructiveNotifications=destructiveNotifications)
for name, (factory, destructiveNotifications) in _imageFactories.items():
registerRepresentationFactory(Image, name, factory, destructiveNotifications=destructiveNotifications)
|
b68244965b4f69711f0c4d9d42f24e6b3f5742f4
|
update-images.py
|
update-images.py
|
#!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
|
Add script to update images in the js code
|
Add script to update images in the js code
|
Python
|
bsd-2-clause
|
richq/toggle-js-addon
|
Add script to update images in the js code
|
#!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
|
<commit_before><commit_msg>Add script to update images in the js code<commit_after>
|
#!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
|
Add script to update images in the js code#!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
|
<commit_before><commit_msg>Add script to update images in the js code<commit_after>#!/usr/bin/env python
import urllib
def img2base64(img):
return open(img, "rb").read().encode("base64").replace('\n', '')
disabled_base64 = img2base64("assets/no-js.png")
enabled_base64 = img2base64("assets/jsenabled.png")
data = open('bootstrap.js')
output = []
for line in data.readlines():
if line.startswith('const DISABLED_ICON'):
line = 'const DISABLED_ICON = "data:image/png;base64,%s";\n' % disabled_base64
if line.startswith('const ENABLED_ICON'):
line = 'const ENABLED_ICON = "data:image/png;base64,%s";\n' % enabled_base64
output.append(line)
data.close()
data = open('bootstrap.js', 'w')
for line in output:
data.write(line)
data.close()
data = open('index.html', 'w')
data.write("<img src='data:image/png;base64,%s'>" % disabled_base64)
data.write("<img src='data:image/png;base64,%s'>" % enabled_base64)
data.close()
|
|
c168efd883bcc1fc5ed8fe3c80de95db905bb468
|
tests/grammar_creation_test/NonterminalAddingTest.py
|
tests/grammar_creation_test/NonterminalAddingTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for nontermianl adding when grammar is create
|
Add file for nontermianl adding when grammar is create
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for nontermianl adding when grammar is create
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for nontermianl adding when grammar is create<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for nontermianl adding when grammar is create#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for nontermianl adding when grammar is create<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import *
class NonterminalAddingTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
307e4fda61f92e344bfd90c1a43f5a9076e7b832
|
tests/rules_tests/isValid_tests/InvalidSyntaxTest.py
|
tests/rules_tests/isValid_tests/InvalidSyntaxTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add files for rule's invalid syntax validation
|
Add files for rule's invalid syntax validation
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add files for rule's invalid syntax validation
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add files for rule's invalid syntax validation<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add files for rule's invalid syntax validation#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add files for rule's invalid syntax validation<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class InvalidSyntaxTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
01710f18efbe29dc5cf187726d5c686beec7e6e7
|
utils/add_plaso_timeline.py
|
utils/add_plaso_timeline.py
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
|
Add helper script for getting plaso timeline in to timesketch
|
Add helper script for getting plaso timeline in to timesketch
|
Python
|
apache-2.0
|
armuk/timesketch,armuk/timesketch,google/timesketch,armuk/timesketch,google/timesketch,armuk/timesketch,google/timesketch,lockhy/timesketch,google/timesketch,lockhy/timesketch,lockhy/timesketch,lockhy/timesketch
|
Add helper script for getting plaso timeline in to timesketch
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
|
<commit_before><commit_msg>Add helper script for getting plaso timeline in to timesketch<commit_after>
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
|
Add helper script for getting plaso timeline in to timesketch# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
|
<commit_before><commit_msg>Add helper script for getting plaso timeline in to timesketch<commit_after># Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add Plaso timeline to timesketch"""
from pyelasticsearch import ElasticSearch
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timesketch.settings")
from django.conf import settings
from django.contrib.auth.models import User
from timesketch.models import Sketch
from timesketch.models import Timeline
from timesketch.models import SketchTimeline
user = User.objects.get(id=2)
es_server = sys.argv[1]
es_port = sys.argv[2]
name = sys.argv[3]
index = sys.argv[4]
es = ElasticSearch("http://%s:%s" % (es_server, es_port))
mapping = {
"plaso_event": {
u'properties': {
u'timesketch_label': {
"type": "nested"}
}
},
}
es.put_mapping(index, "plaso_event", mapping)
timeline = Timeline.objects.create(owner=user, acl_public=True, title=name, description=name, datastore_index=index)
|
|
2c0fc3387a6dbd54bbcd4c47952ce8739d0b2152
|
dedup_worker.py
|
dedup_worker.py
|
# Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
|
Add super-simple deduplication filter that uses a dictionary
|
Add super-simple deduplication filter that uses a dictionary
|
Python
|
mit
|
ryansb/zaqar-webscraper-demo
|
Add super-simple deduplication filter that uses a dictionary
|
# Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
|
<commit_before><commit_msg>Add super-simple deduplication filter that uses a dictionary<commit_after>
|
# Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
|
Add super-simple deduplication filter that uses a dictionary# Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
|
<commit_before><commit_msg>Add super-simple deduplication filter that uses a dictionary<commit_after># Pull URL
# Strip query string
# Query exists
# IFN save to sqlite
# IFN push to queue
# IFY do nothing
seen = {}
if __name__ == '__main__':
from helpers import client
ingest = client.queue('ingest')
scrape = client.queue('scrape')
while True:
claimed = ingest.claim(ttl=180, grace=60)
send = []
for msg in claimed:
msg.delete()
if seen.get(msg.body):
print "skipping %s, seen %d pages" % (msg.body, len(seen.keys()))
continue
print "Sending along %s" % msg.body
seen[msg.body] = True
send.append({'body': msg.body, 'ttl': 180})
if len(send): scrape.post(send)
|
|
9d30c51aac7ca00b4f191270a82f24372687163c
|
svg2pdf.py
|
svg2pdf.py
|
#!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
|
Add Pandoc filter to convert SVG illustrations to PDF
|
Add Pandoc filter to convert SVG illustrations to PDF
|
Python
|
agpl-3.0
|
alerque/casile,alerque/casile,alerque/casile,alerque/casile,alerque/casile
|
Add Pandoc filter to convert SVG illustrations to PDF
|
#!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
|
<commit_before><commit_msg>Add Pandoc filter to convert SVG illustrations to PDF<commit_after>
|
#!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
|
Add Pandoc filter to convert SVG illustrations to PDF#!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
|
<commit_before><commit_msg>Add Pandoc filter to convert SVG illustrations to PDF<commit_after>#!/usr/bin/env python
"""
Pandoc filter to convert svg files to pdf as suggested at:
https://github.com/jgm/pandoc/issues/265#issuecomment-27317316
"""
__author__ = "Jerome Robert"
import mimetypes
import subprocess
import os
import sys
from pandocfilters import toJSONFilter, Image
fmt_to_option = {
"sile": ("--export-pdf","pdf"),
"docx": ("--export-png", "png"),
}
def svg_to_any(key, value, fmt, meta):
if key == 'Image':
if len(value) == 2:
# before pandoc 1.16
alt, [src, title] = value
attrs = None
else:
attrs, alt, [src, title] = value
mimet,_ = mimetypes.guess_type(src)
option = fmt_to_option.get(fmt)
if mimet == 'image/svg+xml' and option:
base_name,_ = os.path.splitext(src)
eps_name = base_name + "." + option[1]
try:
mtime = os.path.getmtime(eps_name)
except OSError:
mtime = -1
if mtime < os.path.getmtime(src):
cmd_line = ['inkscape', option[0], eps_name, src]
sys.stderr.write("Running %s\n" % " ".join(cmd_line))
subprocess.call(cmd_line, stdout=sys.stderr.fileno())
if attrs:
return Image(attrs, alt, [eps_name, title])
else:
return Image(alt, [eps_name, title])
if __name__ == "__main__":
toJSONFilter(svg_to_any)
|
|
b7c6b5115ce5aec129af64d6b85c672901a435d3
|
gpmcc/experiments/particle_engine.py
|
gpmcc/experiments/particle_engine.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
|
Add a multiprocessor for particle learning.
|
Add a multiprocessor for particle learning.
|
Python
|
apache-2.0
|
probcomp/cgpm,probcomp/cgpm
|
Add a multiprocessor for particle learning.
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
|
<commit_before><commit_msg>Add a multiprocessor for particle learning.<commit_after>
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
|
Add a multiprocessor for particle learning.# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
|
<commit_before><commit_msg>Add a multiprocessor for particle learning.<commit_after># -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from gpmcc.experiments.particle_dim import ParticleDim
import multiprocessing
def _particle_learn(args):
X, cctype, distargs, seed = args
np.random.seed(seed)
np.random.shuffle(X)
dim = ParticleDim(X, cctype, distargs)
dim.particle_learn()
return dim
class ParticleEngine(object):
"""Particle Engine."""
def __init__(self, X, dist, distargs=None, multithread=True):
self.multithread = multithread
self.map = map
if self.multithread:
self.pool = multiprocessing.Pool(multiprocessing.cpu_count())
self.map = self.pool.map
self.X = X
self.dist = dist
self.distargs = distargs
self.dims = None
def particle_learn(self, particles=1, seeds=None):
"""Do particle learning in parallel."""
if seeds is None:
seeds = range(particles)
assert len(seeds) == particles
args = ((self.X, self.dist, self.distargs, seed) for (_, seed) in
zip(xrange(particles), seeds))
self.dims = self.map(_particle_learn, args)
def get_dim(self, index):
return self.dims[index]
|
|
3a9807fd14257c49490ec429d7365c902209508c
|
gumbo_stats.py
|
gumbo_stats.py
|
import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
|
Add beginnings of a Python driver. Currently just prints out input file.
|
Add beginnings of a Python driver. Currently just prints out input file.
|
Python
|
apache-2.0
|
nostrademons/GumboStats,nostrademons/GumboStats
|
Add beginnings of a Python driver. Currently just prints out input file.
|
import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
|
<commit_before><commit_msg>Add beginnings of a Python driver. Currently just prints out input file.<commit_after>
|
import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
|
Add beginnings of a Python driver. Currently just prints out input file.import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
|
<commit_before><commit_msg>Add beginnings of a Python driver. Currently just prints out input file.<commit_after>import ctypes
import sys
def parse_warc(filename):
pass
def parse_file(filename):
with open(filename) as infile:
text = infile.read()
print(text)
if __name__ == '__main__':
filename = sys.argv[1]
if filename.endswith('.warc.gz'):
parse_warc(filename)
else:
parse_file(filename)
|
|
36d7bc4719490b046d8782465ddeba6e8240233e
|
tools/xml_split_images_locale.py
|
tools/xml_split_images_locale.py
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
|
Split images into bf and bc bears. Defaults to images.
|
Split images into bf and bc bears. Defaults to images.
|
Python
|
mit
|
hypraptive/bearid,hypraptive/bearid,hypraptive/bearid
|
Split images into bf and bc bears. Defaults to images.
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
|
<commit_before><commit_msg>Split images into bf and bc bears. Defaults to images.<commit_after>
|
#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
|
Split images into bf and bc bears. Defaults to images.#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
|
<commit_before><commit_msg>Split images into bf and bc bears. Defaults to images.<commit_after>#! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## can be called with:
##
## write bc and bf face images to separate files
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='write faces from different locale to different xmls.',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('files', nargs='+')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('faces')
verbose = 0
if verbose > 0:
print("files: ", args.files)
u.split_objects_by_locales (args.files)
if __name__ == "__main__":
main (sys.argv)
|
|
1dbfcfd6558a3148ea2726898d65e1e8ef9115fc
|
mysite/customs/management/commands/import_bugimporter_data.py
|
mysite/customs/management/commands/import_bugimporter_data.py
|
# This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
|
Add a management command that imports bug data from YAML files
|
Add a management command that imports bug data from YAML files
|
Python
|
agpl-3.0
|
vipul-sharma20/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,SnappleCap/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,campbe13/openhatch,heeraj123/oh-mainline,eeshangarg/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,nirmeshk/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,ojengwa/oh-mainline,openhatch/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,ehashman/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,openhatch/oh-mainline,willingc/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,vipul-sharma20/oh-mainline
|
Add a management command that imports bug data from YAML files
|
# This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
|
<commit_before><commit_msg>Add a management command that imports bug data from YAML files<commit_after>
|
# This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
|
Add a management command that imports bug data from YAML files# This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
|
<commit_before><commit_msg>Add a management command that imports bug data from YAML files<commit_after># This file is part of OpenHatch.
# Copyright (C) 2012 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand
import mysite.customs.core_bugimporters
import yaml
class Command(BaseCommand):
args = '<yaml_file yaml_file ...>'
help = "Call this command and pass it YAML files to load into the Bug table"
def handle(self, *args, **options):
for yaml_file in args:
with open(yaml_file) as f:
s = f.read()
bug_dicts = yaml.load(s)
for bug_dict in bug_dicts:
mysite.customs.core_bugimporters.import_one_bug_item(bug_dict)
|
|
1314da3ffbaa42aca4a917aef8a230478a22be68
|
scripts/order-symlinks.py
|
scripts/order-symlinks.py
|
#!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
|
Add a script that uses the JSON metadata to create ordered symlinks.
|
Add a script that uses the JSON metadata to create ordered symlinks.
|
Python
|
mit
|
webcomics/dosage,mbrandis/dosage,peterjanes/dosage,Freestila/dosage,wummel/dosage,wummel/dosage,blade2005/dosage,Freestila/dosage,mbrandis/dosage,peterjanes/dosage,webcomics/dosage,blade2005/dosage
|
Add a script that uses the JSON metadata to create ordered symlinks.
|
#!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
|
<commit_before><commit_msg>Add a script that uses the JSON metadata to create ordered symlinks.<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
|
Add a script that uses the JSON metadata to create ordered symlinks.#!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
|
<commit_before><commit_msg>Add a script that uses the JSON metadata to create ordered symlinks.<commit_after>#!/usr/bin/env python
# Copyright (C) 2013 Tobias Gruetzmacher
"""
This script takes the JSON file created by 'dosage -o json' and uses the
metadata to build a symlink farm in the deduced order of the comic. It created
those in a subdirectory called 'inorder'.
"""
from __future__ import print_function
import sys
import os
import codecs
import json
def jsonFn(d):
return os.path.join(d, 'dosage.json')
def loadJson(d):
with codecs.open(jsonFn(d), 'r', 'utf-8') as f:
data = json.load(f)
return data
def prepare_output(d):
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
def create_symlinks(d):
data = loadJson(d)
outDir = prepare_output(d)
unseen = data["pages"].keys()
while len(unseen) > 0:
latest = work = unseen[0]
while work in unseen:
unseen.remove(work)
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
print("Latest page: %s" % (latest))
order = []
work = latest
while work in data["pages"]:
order.extend(data["pages"][work]["images"].values())
if "prev" in data["pages"][work]:
work = data["pages"][work]["prev"]
else:
work = None
order.reverse()
for i, img in enumerate(order):
os.symlink(os.path.join('..', img), os.path.join(outDir, '%05i_%s' % (i, img)))
if __name__ == '__main__':
if len(sys.argv) > 1:
for d in sys.argv[1:]:
if os.path.exists(jsonFn(d)):
create_symlinks(d)
else:
print("No JSON file found in '%s'." % (d))
else:
print("Usage: %s comic-dirs" % (os.path.basename(sys.argv[0])))
|
|
8180c84a98bec11308afca884a4d7fed4738403b
|
spacy/tests/test_align.py
|
spacy/tests/test_align.py
|
import pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
|
Add tests for new Levenshtein alignment
|
Add tests for new Levenshtein alignment
|
Python
|
mit
|
explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy
|
Add tests for new Levenshtein alignment
|
import pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
|
<commit_before><commit_msg>Add tests for new Levenshtein alignment<commit_after>
|
import pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
|
Add tests for new Levenshtein alignmentimport pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
|
<commit_before><commit_msg>Add tests for new Levenshtein alignment<commit_after>import pytest
from .._align import align
@pytest.mark.parametrize('string1,string2,cost', [
(b'hello', b'hell', 1),
(b'rat', b'cat', 1),
(b'rat', b'rat', 0),
(b'rat', b'catsie', 4),
(b't', b'catsie', 5),
])
def test_align_costs(string1, string2, cost):
output_cost, i2j, j2i, matrix = align(string1, string2)
assert output_cost == cost
@pytest.mark.parametrize('string1,string2,i2j', [
(b'hello', b'hell', [0,1,2,3,-1]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2]),
(b't', b'catsie', [2]),
])
def test_align_i2j(string1, string2, i2j):
output_cost, output_i2j, j2i, matrix = align(string1, string2)
assert list(output_i2j) == i2j
@pytest.mark.parametrize('string1,string2,j2i', [
(b'hello', b'hell', [0,1,2,3]),
(b'rat', b'cat', [0,1,2]),
(b'rat', b'rat', [0,1,2]),
(b'rat', b'catsie', [0,1,2, -1, -1, -1]),
(b't', b'catsie', [-1, -1, 0, -1, -1, -1]),
])
def test_align_i2j(string1, string2, j2i):
output_cost, output_i2j, output_j2i, matrix = align(string1, string2)
assert list(output_j2i) == j2i
|
|
1a6818d4829c3da42750f6d0f042df203434595c
|
Carkinos/probes/migrations/0002_auto_20160106_2307.py
|
Carkinos/probes/migrations/0002_auto_20160106_2307.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProbeID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Probe_id', models.CharField(max_length=20)),
('Gene_symbol', models.CharField(max_length=20)),
('Entrez_id', models.IntegerField()),
('Gene_name', models.TextField(blank=True, default='')),
],
),
migrations.DeleteModel(
name='ProbeID_GeneSymbol',
),
]
|
Add a little to models
|
Add a little to models
Add a little to models
(Should I upload migrations?)
|
Python
|
mit
|
LeeYiFang/Carkinos,LeeYiFang/Carkinos,LeeYiFang/Carkinos,LeeYiFang/Carkinos
|
Add a little to models
Add a little to models
(Should I upload migrations?)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProbeID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Probe_id', models.CharField(max_length=20)),
('Gene_symbol', models.CharField(max_length=20)),
('Entrez_id', models.IntegerField()),
('Gene_name', models.TextField(blank=True, default='')),
],
),
migrations.DeleteModel(
name='ProbeID_GeneSymbol',
),
]
|
<commit_before><commit_msg>Add a little to models
Add a little to models
(Should I upload migrations?)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProbeID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Probe_id', models.CharField(max_length=20)),
('Gene_symbol', models.CharField(max_length=20)),
('Entrez_id', models.IntegerField()),
('Gene_name', models.TextField(blank=True, default='')),
],
),
migrations.DeleteModel(
name='ProbeID_GeneSymbol',
),
]
|
Add a little to models
Add a little to models
(Should I upload migrations?)# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProbeID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Probe_id', models.CharField(max_length=20)),
('Gene_symbol', models.CharField(max_length=20)),
('Entrez_id', models.IntegerField()),
('Gene_name', models.TextField(blank=True, default='')),
],
),
migrations.DeleteModel(
name='ProbeID_GeneSymbol',
),
]
|
<commit_before><commit_msg>Add a little to models
Add a little to models
(Should I upload migrations?)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-06 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('probes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProbeID',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Probe_id', models.CharField(max_length=20)),
('Gene_symbol', models.CharField(max_length=20)),
('Entrez_id', models.IntegerField()),
('Gene_name', models.TextField(blank=True, default='')),
],
),
migrations.DeleteModel(
name='ProbeID_GeneSymbol',
),
]
|
|
4fe62ac1211e68f1d9c656453bdf71d6849c3daf
|
migrations/versions/0101_een_logo.py
|
migrations/versions/0101_een_logo.py
|
"""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = '0101_een_logo'
down_revision = '0100_notification_created_by'
ENTERPRISE_EUROPE_NETWORK_ID = '89ce468b-fb29-4d5d-bd3f-d468fb6f7c36'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'Enterprise Europe Network'
)""".format(ENTERPRISE_EUROPE_NETWORK_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(ENTERPRISE_EUROPE_NETWORK_ID))
|
Add organisation values for the Enterprise Europe Network.
|
Add organisation values for the Enterprise Europe Network.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add organisation values for the Enterprise Europe Network.
|
"""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = '0101_een_logo'
down_revision = '0100_notification_created_by'
ENTERPRISE_EUROPE_NETWORK_ID = '89ce468b-fb29-4d5d-bd3f-d468fb6f7c36'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'Enterprise Europe Network'
)""".format(ENTERPRISE_EUROPE_NETWORK_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(ENTERPRISE_EUROPE_NETWORK_ID))
|
<commit_before><commit_msg>Add organisation values for the Enterprise Europe Network.<commit_after>
|
"""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = '0101_een_logo'
down_revision = '0100_notification_created_by'
ENTERPRISE_EUROPE_NETWORK_ID = '89ce468b-fb29-4d5d-bd3f-d468fb6f7c36'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'Enterprise Europe Network'
)""".format(ENTERPRISE_EUROPE_NETWORK_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(ENTERPRISE_EUROPE_NETWORK_ID))
|
Add organisation values for the Enterprise Europe Network."""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = '0101_een_logo'
down_revision = '0100_notification_created_by'
ENTERPRISE_EUROPE_NETWORK_ID = '89ce468b-fb29-4d5d-bd3f-d468fb6f7c36'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'Enterprise Europe Network'
)""".format(ENTERPRISE_EUROPE_NETWORK_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(ENTERPRISE_EUROPE_NETWORK_ID))
|
<commit_before><commit_msg>Add organisation values for the Enterprise Europe Network.<commit_after>"""empty message
Revision ID: 0101_een_logo
Revises: 0100_notification_created_by
Create Date: 2017-06-26 11:43:30.374723
"""
from alembic import op
revision = '0101_een_logo'
down_revision = '0100_notification_created_by'
ENTERPRISE_EUROPE_NETWORK_ID = '89ce468b-fb29-4d5d-bd3f-d468fb6f7c36'
def upgrade():
op.execute("""INSERT INTO organisation VALUES (
'{}',
'',
'een_x2.png',
'Enterprise Europe Network'
)""".format(ENTERPRISE_EUROPE_NETWORK_ID))
def downgrade():
op.execute("""
DELETE FROM organisation WHERE "id" = '{}'
""".format(ENTERPRISE_EUROPE_NETWORK_ID))
|
|
845615f2a34c5680ed22a2f4eafa5febe7cd7246
|
alembic/versions/20087beff9ea_added_date_updated_t.py
|
alembic/versions/20087beff9ea_added_date_updated_t.py
|
"""Added date updated to Owner
Revision ID: 20087beff9ea
Revises: 2dc72d16c188
Create Date: 2014-03-09 01:43:00.648013
"""
# revision identifiers, used by Alembic.
revision = '20087beff9ea'
down_revision = '2dc72d16c188'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('owner', sa.Column('date_updated', sa.DateTime()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('owner', 'date_updated')
### end Alembic commands ###
|
Add date updated to Owner
|
Add date updated to Owner
|
Python
|
apache-2.0
|
CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords,CityOfNewYork/NYCOpenRecords
|
Add date updated to Owner
|
"""Added date updated to Owner
Revision ID: 20087beff9ea
Revises: 2dc72d16c188
Create Date: 2014-03-09 01:43:00.648013
"""
# revision identifiers, used by Alembic.
revision = '20087beff9ea'
down_revision = '2dc72d16c188'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('owner', sa.Column('date_updated', sa.DateTime()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('owner', 'date_updated')
### end Alembic commands ###
|
<commit_before><commit_msg>Add date updated to Owner<commit_after>
|
"""Added date updated to Owner
Revision ID: 20087beff9ea
Revises: 2dc72d16c188
Create Date: 2014-03-09 01:43:00.648013
"""
# revision identifiers, used by Alembic.
revision = '20087beff9ea'
down_revision = '2dc72d16c188'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('owner', sa.Column('date_updated', sa.DateTime()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('owner', 'date_updated')
### end Alembic commands ###
|
Add date updated to Owner"""Added date updated to Owner
Revision ID: 20087beff9ea
Revises: 2dc72d16c188
Create Date: 2014-03-09 01:43:00.648013
"""
# revision identifiers, used by Alembic.
revision = '20087beff9ea'
down_revision = '2dc72d16c188'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('owner', sa.Column('date_updated', sa.DateTime()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('owner', 'date_updated')
### end Alembic commands ###
|
<commit_before><commit_msg>Add date updated to Owner<commit_after>"""Added date updated to Owner
Revision ID: 20087beff9ea
Revises: 2dc72d16c188
Create Date: 2014-03-09 01:43:00.648013
"""
# revision identifiers, used by Alembic.
revision = '20087beff9ea'
down_revision = '2dc72d16c188'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('owner', sa.Column('date_updated', sa.DateTime()))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('owner', 'date_updated')
### end Alembic commands ###
|
|
0781d105e4182bdd8abf1a8c7185311a48273c28
|
salt/beacons/smartos_imgadm.py
|
salt/beacons/smartos_imgadm.py
|
# -*- coding: utf-8 -*-
'''
Beacon that fires events on image import/delete.
.. code-block:: yaml
## minimal
# - check for new images every 1 second (salt default)
# - does not send events at startup
beacons:
imgadm: []
## standard
# - check for new images every 60 seconds
# - send import events at startup for all images
beacons:
imgadm:
- interval: 60
- startup_import_event: True
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'imgadm'
IMGADM_STATE = {
'first_run': True,
'images': [],
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Provides imgadm beacon on SmartOS
'''
if 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} beacon can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def validate(config):
'''
Validate the beacon configuration
'''
vcfg_ret = True
vcfg_msg = 'Valid beacon configuration'
if not isinstance(config, list):
vcfg_ret = False
vcfg_msg = 'Configuration for imgadm beacon must be a list!'
return vcfg_ret, vcfg_msg
def beacon(config):
'''
Poll imgadm and compare available images
'''
ret = []
# NOTE: lookup current images
current_images = __salt__['imgadm.list'](verbose=True)
# NOTE: apply configuration
if IMGADM_STATE['first_run']:
log.info('Applying configuration for imgadm beacon')
_config = {}
list(map(_config.update, config))
if 'startup_import_event' not in _config or not _config['startup_import_event']:
IMGADM_STATE['images'] = current_images
# NOTE: import events
for uuid in current_images:
event = {}
if uuid not in IMGADM_STATE['images']:
event['tag'] = "imported/{}".format(uuid)
for label in current_images[uuid]:
event[label] = current_images[uuid][label]
if event:
ret.append(event)
# NOTE: delete events
for uuid in IMGADM_STATE['images']:
event = {}
if uuid not in current_images:
event['tag'] = "deleted/{}".format(uuid)
for label in IMGADM_STATE['images'][uuid]:
event[label] = IMGADM_STATE['images'][uuid][label]
if event:
ret.append(event)
# NOTE: update stored state
IMGADM_STATE['images'] = current_images
# NOTE: disable first_run
if IMGADM_STATE['first_run']:
IMGADM_STATE['first_run'] = False
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
Add imgadm beacons for SmartOS
|
Add imgadm beacons for SmartOS
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add imgadm beacons for SmartOS
|
# -*- coding: utf-8 -*-
'''
Beacon that fires events on image import/delete.
.. code-block:: yaml
## minimal
# - check for new images every 1 second (salt default)
# - does not send events at startup
beacons:
imgadm: []
## standard
# - check for new images every 60 seconds
# - send import events at startup for all images
beacons:
imgadm:
- interval: 60
- startup_import_event: True
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'imgadm'
IMGADM_STATE = {
'first_run': True,
'images': [],
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Provides imgadm beacon on SmartOS
'''
if 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} beacon can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def validate(config):
'''
Validate the beacon configuration
'''
vcfg_ret = True
vcfg_msg = 'Valid beacon configuration'
if not isinstance(config, list):
vcfg_ret = False
vcfg_msg = 'Configuration for imgadm beacon must be a list!'
return vcfg_ret, vcfg_msg
def beacon(config):
'''
Poll imgadm and compare available images
'''
ret = []
# NOTE: lookup current images
current_images = __salt__['imgadm.list'](verbose=True)
# NOTE: apply configuration
if IMGADM_STATE['first_run']:
log.info('Applying configuration for imgadm beacon')
_config = {}
list(map(_config.update, config))
if 'startup_import_event' not in _config or not _config['startup_import_event']:
IMGADM_STATE['images'] = current_images
# NOTE: import events
for uuid in current_images:
event = {}
if uuid not in IMGADM_STATE['images']:
event['tag'] = "imported/{}".format(uuid)
for label in current_images[uuid]:
event[label] = current_images[uuid][label]
if event:
ret.append(event)
# NOTE: delete events
for uuid in IMGADM_STATE['images']:
event = {}
if uuid not in current_images:
event['tag'] = "deleted/{}".format(uuid)
for label in IMGADM_STATE['images'][uuid]:
event[label] = IMGADM_STATE['images'][uuid][label]
if event:
ret.append(event)
# NOTE: update stored state
IMGADM_STATE['images'] = current_images
# NOTE: disable first_run
if IMGADM_STATE['first_run']:
IMGADM_STATE['first_run'] = False
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>Add imgadm beacons for SmartOS<commit_after>
|
# -*- coding: utf-8 -*-
'''
Beacon that fires events on image import/delete.
.. code-block:: yaml
## minimal
# - check for new images every 1 second (salt default)
# - does not send events at startup
beacons:
imgadm: []
## standard
# - check for new images every 60 seconds
# - send import events at startup for all images
beacons:
imgadm:
- interval: 60
- startup_import_event: True
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'imgadm'
IMGADM_STATE = {
'first_run': True,
'images': [],
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Provides imgadm beacon on SmartOS
'''
if 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} beacon can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def validate(config):
'''
Validate the beacon configuration
'''
vcfg_ret = True
vcfg_msg = 'Valid beacon configuration'
if not isinstance(config, list):
vcfg_ret = False
vcfg_msg = 'Configuration for imgadm beacon must be a list!'
return vcfg_ret, vcfg_msg
def beacon(config):
'''
Poll imgadm and compare available images
'''
ret = []
# NOTE: lookup current images
current_images = __salt__['imgadm.list'](verbose=True)
# NOTE: apply configuration
if IMGADM_STATE['first_run']:
log.info('Applying configuration for imgadm beacon')
_config = {}
list(map(_config.update, config))
if 'startup_import_event' not in _config or not _config['startup_import_event']:
IMGADM_STATE['images'] = current_images
# NOTE: import events
for uuid in current_images:
event = {}
if uuid not in IMGADM_STATE['images']:
event['tag'] = "imported/{}".format(uuid)
for label in current_images[uuid]:
event[label] = current_images[uuid][label]
if event:
ret.append(event)
# NOTE: delete events
for uuid in IMGADM_STATE['images']:
event = {}
if uuid not in current_images:
event['tag'] = "deleted/{}".format(uuid)
for label in IMGADM_STATE['images'][uuid]:
event[label] = IMGADM_STATE['images'][uuid][label]
if event:
ret.append(event)
# NOTE: update stored state
IMGADM_STATE['images'] = current_images
# NOTE: disable first_run
if IMGADM_STATE['first_run']:
IMGADM_STATE['first_run'] = False
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
Add imgadm beacons for SmartOS# -*- coding: utf-8 -*-
'''
Beacon that fires events on image import/delete.
.. code-block:: yaml
## minimal
# - check for new images every 1 second (salt default)
# - does not send events at startup
beacons:
imgadm: []
## standard
# - check for new images every 60 seconds
# - send import events at startup for all images
beacons:
imgadm:
- interval: 60
- startup_import_event: True
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'imgadm'
IMGADM_STATE = {
'first_run': True,
'images': [],
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Provides imgadm beacon on SmartOS
'''
if 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} beacon can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def validate(config):
'''
Validate the beacon configuration
'''
vcfg_ret = True
vcfg_msg = 'Valid beacon configuration'
if not isinstance(config, list):
vcfg_ret = False
vcfg_msg = 'Configuration for imgadm beacon must be a list!'
return vcfg_ret, vcfg_msg
def beacon(config):
'''
Poll imgadm and compare available images
'''
ret = []
# NOTE: lookup current images
current_images = __salt__['imgadm.list'](verbose=True)
# NOTE: apply configuration
if IMGADM_STATE['first_run']:
log.info('Applying configuration for imgadm beacon')
_config = {}
list(map(_config.update, config))
if 'startup_import_event' not in _config or not _config['startup_import_event']:
IMGADM_STATE['images'] = current_images
# NOTE: import events
for uuid in current_images:
event = {}
if uuid not in IMGADM_STATE['images']:
event['tag'] = "imported/{}".format(uuid)
for label in current_images[uuid]:
event[label] = current_images[uuid][label]
if event:
ret.append(event)
# NOTE: delete events
for uuid in IMGADM_STATE['images']:
event = {}
if uuid not in current_images:
event['tag'] = "deleted/{}".format(uuid)
for label in IMGADM_STATE['images'][uuid]:
event[label] = IMGADM_STATE['images'][uuid][label]
if event:
ret.append(event)
# NOTE: update stored state
IMGADM_STATE['images'] = current_images
# NOTE: disable first_run
if IMGADM_STATE['first_run']:
IMGADM_STATE['first_run'] = False
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>Add imgadm beacons for SmartOS<commit_after># -*- coding: utf-8 -*-
'''
Beacon that fires events on image import/delete.
.. code-block:: yaml
## minimal
# - check for new images every 1 second (salt default)
# - does not send events at startup
beacons:
imgadm: []
## standard
# - check for new images every 60 seconds
# - send import events at startup for all images
beacons:
imgadm:
- interval: 60
- startup_import_event: True
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
__virtualname__ = 'imgadm'
IMGADM_STATE = {
'first_run': True,
'images': [],
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Provides imgadm beacon on SmartOS
'''
if 'imgadm.list' in __salt__:
return True
else:
return (
False,
'{0} beacon can only be loaded on SmartOS compute nodes'.format(
__virtualname__
)
)
def validate(config):
'''
Validate the beacon configuration
'''
vcfg_ret = True
vcfg_msg = 'Valid beacon configuration'
if not isinstance(config, list):
vcfg_ret = False
vcfg_msg = 'Configuration for imgadm beacon must be a list!'
return vcfg_ret, vcfg_msg
def beacon(config):
'''
Poll imgadm and compare available images
'''
ret = []
# NOTE: lookup current images
current_images = __salt__['imgadm.list'](verbose=True)
# NOTE: apply configuration
if IMGADM_STATE['first_run']:
log.info('Applying configuration for imgadm beacon')
_config = {}
list(map(_config.update, config))
if 'startup_import_event' not in _config or not _config['startup_import_event']:
IMGADM_STATE['images'] = current_images
# NOTE: import events
for uuid in current_images:
event = {}
if uuid not in IMGADM_STATE['images']:
event['tag'] = "imported/{}".format(uuid)
for label in current_images[uuid]:
event[label] = current_images[uuid][label]
if event:
ret.append(event)
# NOTE: delete events
for uuid in IMGADM_STATE['images']:
event = {}
if uuid not in current_images:
event['tag'] = "deleted/{}".format(uuid)
for label in IMGADM_STATE['images'][uuid]:
event[label] = IMGADM_STATE['images'][uuid][label]
if event:
ret.append(event)
# NOTE: update stored state
IMGADM_STATE['images'] = current_images
# NOTE: disable first_run
if IMGADM_STATE['first_run']:
IMGADM_STATE['first_run'] = False
return ret
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
8b63dc73b4e3303d1b86faf42f635f3ce01e9da4
|
run.py
|
run.py
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import subprocess as sub
### Parse command line arguments
parser = argparse.ArgumentParser(description="M/M/1 queue simulation -- Helper script")
parser.add_argument('reps', metavar='repetitions',
type=int, help='number of repetitions')
parser.add_argument('sim_duration', metavar='simulation_duration',
type=int, help='duration of each simulation stage in seconds')
parser.add_argument('int_rate', metavar='interarrival_rate',
type=int, help='mean packet interarrival rate in seconds')
parser.add_argument('sr_rate', metavar='service_rate',
type=int, help='mean packet service rate in seconds')
parser.add_argument('--batch_size', dest='batch_size', default=4,
type=int, help='batch size for multiprocessing')
parser.add_argument('--initial_seed', dest='init_seed', default=0,
type=int, help='base for seed values')
args = parser.parse_args()
repetitions = args.reps
sim_duration = args.sim_duration
interarrival_rate = args.int_rate
service_rate = args.sr_rate
batch_size = args.batch_size
init_seed = args.init_seed
### Run simulations
try:
# One process at a time
if batch_size == 1:
for n in range(repetitions):
sub.call("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)
# In batches
else:
# Split num of repetitions into batches
quotient = repetitions // batch_size
remainder = repetitions % batch_size
# Run the simulations in parallel as subprocesses
num_proc = batch_size if batch_size <= repetitions else remainder
procs = [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True) for n in range(num_proc)]
while True:
procs_poll = list(map(lambda x: x.poll() != None, procs))
if not all(procs_poll):
procs[procs_poll.index(False)].wait()
elif num_proc < repetitions:
temp_num = batch_size if num_proc + batch_size <= repetitions else remainder
for n in range(num_proc, num_proc + temp_num):
procs += [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)]
num_proc += temp_num
else:
break
except OSError as e:
print("Execution failed: ", e)
|
Create helper script providing multiprocessing support.
|
Create helper script providing multiprocessing support.
|
Python
|
mit
|
kubkon/des-in-python
|
Create helper script providing multiprocessing support.
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import subprocess as sub
### Parse command line arguments
parser = argparse.ArgumentParser(description="M/M/1 queue simulation -- Helper script")
parser.add_argument('reps', metavar='repetitions',
type=int, help='number of repetitions')
parser.add_argument('sim_duration', metavar='simulation_duration',
type=int, help='duration of each simulation stage in seconds')
parser.add_argument('int_rate', metavar='interarrival_rate',
type=int, help='mean packet interarrival rate in seconds')
parser.add_argument('sr_rate', metavar='service_rate',
type=int, help='mean packet service rate in seconds')
parser.add_argument('--batch_size', dest='batch_size', default=4,
type=int, help='batch size for multiprocessing')
parser.add_argument('--initial_seed', dest='init_seed', default=0,
type=int, help='base for seed values')
args = parser.parse_args()
repetitions = args.reps
sim_duration = args.sim_duration
interarrival_rate = args.int_rate
service_rate = args.sr_rate
batch_size = args.batch_size
init_seed = args.init_seed
### Run simulations
try:
# One process at a time
if batch_size == 1:
for n in range(repetitions):
sub.call("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)
# In batches
else:
# Split num of repetitions into batches
quotient = repetitions // batch_size
remainder = repetitions % batch_size
# Run the simulations in parallel as subprocesses
num_proc = batch_size if batch_size <= repetitions else remainder
procs = [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True) for n in range(num_proc)]
while True:
procs_poll = list(map(lambda x: x.poll() != None, procs))
if not all(procs_poll):
procs[procs_poll.index(False)].wait()
elif num_proc < repetitions:
temp_num = batch_size if num_proc + batch_size <= repetitions else remainder
for n in range(num_proc, num_proc + temp_num):
procs += [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)]
num_proc += temp_num
else:
break
except OSError as e:
print("Execution failed: ", e)
|
<commit_before><commit_msg>Create helper script providing multiprocessing support.<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import subprocess as sub
### Parse command line arguments
parser = argparse.ArgumentParser(description="M/M/1 queue simulation -- Helper script")
parser.add_argument('reps', metavar='repetitions',
type=int, help='number of repetitions')
parser.add_argument('sim_duration', metavar='simulation_duration',
type=int, help='duration of each simulation stage in seconds')
parser.add_argument('int_rate', metavar='interarrival_rate',
type=int, help='mean packet interarrival rate in seconds')
parser.add_argument('sr_rate', metavar='service_rate',
type=int, help='mean packet service rate in seconds')
parser.add_argument('--batch_size', dest='batch_size', default=4,
type=int, help='batch size for multiprocessing')
parser.add_argument('--initial_seed', dest='init_seed', default=0,
type=int, help='base for seed values')
args = parser.parse_args()
repetitions = args.reps
sim_duration = args.sim_duration
interarrival_rate = args.int_rate
service_rate = args.sr_rate
batch_size = args.batch_size
init_seed = args.init_seed
### Run simulations
try:
# One process at a time
if batch_size == 1:
for n in range(repetitions):
sub.call("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)
# In batches
else:
# Split num of repetitions into batches
quotient = repetitions // batch_size
remainder = repetitions % batch_size
# Run the simulations in parallel as subprocesses
num_proc = batch_size if batch_size <= repetitions else remainder
procs = [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True) for n in range(num_proc)]
while True:
procs_poll = list(map(lambda x: x.poll() != None, procs))
if not all(procs_poll):
procs[procs_poll.index(False)].wait()
elif num_proc < repetitions:
temp_num = batch_size if num_proc + batch_size <= repetitions else remainder
for n in range(num_proc, num_proc + temp_num):
procs += [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)]
num_proc += temp_num
else:
break
except OSError as e:
print("Execution failed: ", e)
|
Create helper script providing multiprocessing support.#!/usr/bin/env python
# encoding: utf-8
import argparse
import subprocess as sub
### Parse command line arguments
parser = argparse.ArgumentParser(description="M/M/1 queue simulation -- Helper script")
parser.add_argument('reps', metavar='repetitions',
type=int, help='number of repetitions')
parser.add_argument('sim_duration', metavar='simulation_duration',
type=int, help='duration of each simulation stage in seconds')
parser.add_argument('int_rate', metavar='interarrival_rate',
type=int, help='mean packet interarrival rate in seconds')
parser.add_argument('sr_rate', metavar='service_rate',
type=int, help='mean packet service rate in seconds')
parser.add_argument('--batch_size', dest='batch_size', default=4,
type=int, help='batch size for multiprocessing')
parser.add_argument('--initial_seed', dest='init_seed', default=0,
type=int, help='base for seed values')
args = parser.parse_args()
repetitions = args.reps
sim_duration = args.sim_duration
interarrival_rate = args.int_rate
service_rate = args.sr_rate
batch_size = args.batch_size
init_seed = args.init_seed
### Run simulations
try:
# One process at a time
if batch_size == 1:
for n in range(repetitions):
sub.call("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)
# In batches
else:
# Split num of repetitions into batches
quotient = repetitions // batch_size
remainder = repetitions % batch_size
# Run the simulations in parallel as subprocesses
num_proc = batch_size if batch_size <= repetitions else remainder
procs = [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True) for n in range(num_proc)]
while True:
procs_poll = list(map(lambda x: x.poll() != None, procs))
if not all(procs_poll):
procs[procs_poll.index(False)].wait()
elif num_proc < repetitions:
temp_num = batch_size if num_proc + batch_size <= repetitions else remainder
for n in range(num_proc, num_proc + temp_num):
procs += [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)]
num_proc += temp_num
else:
break
except OSError as e:
print("Execution failed: ", e)
|
<commit_before><commit_msg>Create helper script providing multiprocessing support.<commit_after>#!/usr/bin/env python
# encoding: utf-8
import argparse
import subprocess as sub
### Parse command line arguments
parser = argparse.ArgumentParser(description="M/M/1 queue simulation -- Helper script")
parser.add_argument('reps', metavar='repetitions',
type=int, help='number of repetitions')
parser.add_argument('sim_duration', metavar='simulation_duration',
type=int, help='duration of each simulation stage in seconds')
parser.add_argument('int_rate', metavar='interarrival_rate',
type=int, help='mean packet interarrival rate in seconds')
parser.add_argument('sr_rate', metavar='service_rate',
type=int, help='mean packet service rate in seconds')
parser.add_argument('--batch_size', dest='batch_size', default=4,
type=int, help='batch size for multiprocessing')
parser.add_argument('--initial_seed', dest='init_seed', default=0,
type=int, help='base for seed values')
args = parser.parse_args()
repetitions = args.reps
sim_duration = args.sim_duration
interarrival_rate = args.int_rate
service_rate = args.sr_rate
batch_size = args.batch_size
init_seed = args.init_seed
### Run simulations
try:
# One process at a time
if batch_size == 1:
for n in range(repetitions):
sub.call("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)
# In batches
else:
# Split num of repetitions into batches
quotient = repetitions // batch_size
remainder = repetitions % batch_size
# Run the simulations in parallel as subprocesses
num_proc = batch_size if batch_size <= repetitions else remainder
procs = [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True) for n in range(num_proc)]
while True:
procs_poll = list(map(lambda x: x.poll() != None, procs))
if not all(procs_poll):
procs[procs_poll.index(False)].wait()
elif num_proc < repetitions:
temp_num = batch_size if num_proc + batch_size <= repetitions else remainder
for n in range(num_proc, num_proc + temp_num):
procs += [sub.Popen("python mm1_main.py {} {} {} --seed={}".format(sim_duration, interarrival_rate, service_rate, n+init_seed), shell=True)]
num_proc += temp_num
else:
break
except OSError as e:
print("Execution failed: ", e)
|
|
e0c82bec30568eb845c71fb0335d6ac5edef18e9
|
corehq/apps/translations/migrations/0002_transifexblacklist.py
|
corehq/apps/translations/migrations/0002_transifexblacklist.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransifexBlacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=32)),
('module_id', models.CharField(max_length=32)),
('field_type', models.CharField(choices=[('detail', 'Case Detail'), ('list', 'Case List')], max_length=100)),
('field_name', models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\nExample: case detail for tasks_type would have entries:\n tasks_type (ID Mapping Text)\n child (ID Mapping Value)\n pregnancy (ID Mapping Value)\n")),
('display_text', models.TextField(help_text="The default language's translation for this detail/list. If display_text is not filled out then all translations that match the field_type and field_name will be blacklisted")),
],
),
]
|
Add migration that had conflict from merge with master
|
Add migration that had conflict from merge with master
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration that had conflict from merge with master
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransifexBlacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=32)),
('module_id', models.CharField(max_length=32)),
('field_type', models.CharField(choices=[('detail', 'Case Detail'), ('list', 'Case List')], max_length=100)),
('field_name', models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\nExample: case detail for tasks_type would have entries:\n tasks_type (ID Mapping Text)\n child (ID Mapping Value)\n pregnancy (ID Mapping Value)\n")),
('display_text', models.TextField(help_text="The default language's translation for this detail/list. If display_text is not filled out then all translations that match the field_type and field_name will be blacklisted")),
],
),
]
|
<commit_before><commit_msg>Add migration that had conflict from merge with master<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransifexBlacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=32)),
('module_id', models.CharField(max_length=32)),
('field_type', models.CharField(choices=[('detail', 'Case Detail'), ('list', 'Case List')], max_length=100)),
('field_name', models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\nExample: case detail for tasks_type would have entries:\n tasks_type (ID Mapping Text)\n child (ID Mapping Value)\n pregnancy (ID Mapping Value)\n")),
('display_text', models.TextField(help_text="The default language's translation for this detail/list. If display_text is not filled out then all translations that match the field_type and field_name will be blacklisted")),
],
),
]
|
Add migration that had conflict from merge with master# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransifexBlacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=32)),
('module_id', models.CharField(max_length=32)),
('field_type', models.CharField(choices=[('detail', 'Case Detail'), ('list', 'Case List')], max_length=100)),
('field_name', models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\nExample: case detail for tasks_type would have entries:\n tasks_type (ID Mapping Text)\n child (ID Mapping Value)\n pregnancy (ID Mapping Value)\n")),
('display_text', models.TextField(help_text="The default language's translation for this detail/list. If display_text is not filled out then all translations that match the field_type and field_name will be blacklisted")),
],
),
]
|
<commit_before><commit_msg>Add migration that had conflict from merge with master<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TransifexBlacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=32)),
('module_id', models.CharField(max_length=32)),
('field_type', models.CharField(choices=[('detail', 'Case Detail'), ('list', 'Case List')], max_length=100)),
('field_name', models.TextField(help_text="\nThis is the same string that appears in the bulk translations download.\nUsually the string in either case list or detail under 'property'.\nThis could be an xpath or case property name.\nIf it is an ID Mapping then the property should be '<property> (ID Mapping Text)'.\nFor the values each value should be '<id mapping value> (ID Mapping Value)'.\nExample: case detail for tasks_type would have entries:\n tasks_type (ID Mapping Text)\n child (ID Mapping Value)\n pregnancy (ID Mapping Value)\n")),
('display_text', models.TextField(help_text="The default language's translation for this detail/list. If display_text is not filled out then all translations that match the field_type and field_name will be blacklisted")),
],
),
]
|
|
5be91f4e7b3607090e94fbf221628a359063823d
|
data/bag-brk/create_db.py
|
data/bag-brk/create_db.py
|
import csv
import sqlite3
conn = sqlite3.connect('processed-lines.db')
c = conn.cursor()
# c.execute('CREATE TABLE processed (cadastral_designation text, bag_pand_id text, match_type text, parcel_uri text, '
# 'dummy text, mother_parcel_match text, parcel_error text, timestamp timestamp default CURRENT_TIMESTAMP)')
# c.execute('create index cadastral_designation_idx on processed (cadastral_designation)')
processed_lines = []
print('Opening file...')
with open('processed-lines.csv', 'r') as pr:
processed = csv.reader(pr)
for line in processed:
processed_lines.append(line)
print('Inserting rows into database...')
c.executemany('INSERT INTO processed (cadastral_designation, bag_pand_id, match_type, parcel_uri, '
'dummy, mother_parcel_match, parcel_error) VALUES (?, ?, ?, ?, ?, ?, ?)', processed_lines)
print('Committing records...')
conn.commit()
conn.close()
print('Done!')
|
Use indexed text with sqlite
|
Use indexed text with sqlite
|
Python
|
mit
|
PDOK/data.labs.pdok.nl,PDOK/data.labs.pdok.nl,PDOK/data.labs.pdok.nl,PDOK/data.labs.pdok.nl,PDOK/data.labs.pdok.nl
|
Use indexed text with sqlite
|
import csv
import sqlite3
conn = sqlite3.connect('processed-lines.db')
c = conn.cursor()
# c.execute('CREATE TABLE processed (cadastral_designation text, bag_pand_id text, match_type text, parcel_uri text, '
# 'dummy text, mother_parcel_match text, parcel_error text, timestamp timestamp default CURRENT_TIMESTAMP)')
# c.execute('create index cadastral_designation_idx on processed (cadastral_designation)')
processed_lines = []
print('Opening file...')
with open('processed-lines.csv', 'r') as pr:
processed = csv.reader(pr)
for line in processed:
processed_lines.append(line)
print('Inserting rows into database...')
c.executemany('INSERT INTO processed (cadastral_designation, bag_pand_id, match_type, parcel_uri, '
'dummy, mother_parcel_match, parcel_error) VALUES (?, ?, ?, ?, ?, ?, ?)', processed_lines)
print('Committing records...')
conn.commit()
conn.close()
print('Done!')
|
<commit_before><commit_msg>Use indexed text with sqlite<commit_after>
|
import csv
import sqlite3
conn = sqlite3.connect('processed-lines.db')
c = conn.cursor()
# c.execute('CREATE TABLE processed (cadastral_designation text, bag_pand_id text, match_type text, parcel_uri text, '
# 'dummy text, mother_parcel_match text, parcel_error text, timestamp timestamp default CURRENT_TIMESTAMP)')
# c.execute('create index cadastral_designation_idx on processed (cadastral_designation)')
processed_lines = []
print('Opening file...')
with open('processed-lines.csv', 'r') as pr:
processed = csv.reader(pr)
for line in processed:
processed_lines.append(line)
print('Inserting rows into database...')
c.executemany('INSERT INTO processed (cadastral_designation, bag_pand_id, match_type, parcel_uri, '
'dummy, mother_parcel_match, parcel_error) VALUES (?, ?, ?, ?, ?, ?, ?)', processed_lines)
print('Committing records...')
conn.commit()
conn.close()
print('Done!')
|
Use indexed text with sqliteimport csv
import sqlite3
conn = sqlite3.connect('processed-lines.db')
c = conn.cursor()
# c.execute('CREATE TABLE processed (cadastral_designation text, bag_pand_id text, match_type text, parcel_uri text, '
# 'dummy text, mother_parcel_match text, parcel_error text, timestamp timestamp default CURRENT_TIMESTAMP)')
# c.execute('create index cadastral_designation_idx on processed (cadastral_designation)')
processed_lines = []
print('Opening file...')
with open('processed-lines.csv', 'r') as pr:
processed = csv.reader(pr)
for line in processed:
processed_lines.append(line)
print('Inserting rows into database...')
c.executemany('INSERT INTO processed (cadastral_designation, bag_pand_id, match_type, parcel_uri, '
'dummy, mother_parcel_match, parcel_error) VALUES (?, ?, ?, ?, ?, ?, ?)', processed_lines)
print('Committing records...')
conn.commit()
conn.close()
print('Done!')
|
<commit_before><commit_msg>Use indexed text with sqlite<commit_after>import csv
import sqlite3
conn = sqlite3.connect('processed-lines.db')
c = conn.cursor()
# c.execute('CREATE TABLE processed (cadastral_designation text, bag_pand_id text, match_type text, parcel_uri text, '
# 'dummy text, mother_parcel_match text, parcel_error text, timestamp timestamp default CURRENT_TIMESTAMP)')
# c.execute('create index cadastral_designation_idx on processed (cadastral_designation)')
processed_lines = []
print('Opening file...')
with open('processed-lines.csv', 'r') as pr:
processed = csv.reader(pr)
for line in processed:
processed_lines.append(line)
print('Inserting rows into database...')
c.executemany('INSERT INTO processed (cadastral_designation, bag_pand_id, match_type, parcel_uri, '
'dummy, mother_parcel_match, parcel_error) VALUES (?, ?, ?, ?, ?, ?, ?)', processed_lines)
print('Committing records...')
conn.commit()
conn.close()
print('Done!')
|
|
134dbd68cc4630442f1dddb9426207de93c1498b
|
web/courses/migrations/0005_update_solution_visibility_text.py
|
web/courses/migrations/0005_update_solution_visibility_text.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_institution'),
]
operations = [
migrations.AlterField(
model_name='problemset',
name='solution_visibility',
field=models.CharField(default=b'S', max_length=20, verbose_name='Solution visibility', choices=[(b'H', 'Official solutions are hidden'), (b'S', 'Official solutions are visible when solved'), (b'V', 'Official solutions are visible')]),
),
]
|
Add a missing migration for Course.solution_visibility description
|
Add a missing migration for Course.solution_visibility description
|
Python
|
agpl-3.0
|
matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo
|
Add a missing migration for Course.solution_visibility description
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_institution'),
]
operations = [
migrations.AlterField(
model_name='problemset',
name='solution_visibility',
field=models.CharField(default=b'S', max_length=20, verbose_name='Solution visibility', choices=[(b'H', 'Official solutions are hidden'), (b'S', 'Official solutions are visible when solved'), (b'V', 'Official solutions are visible')]),
),
]
|
<commit_before><commit_msg>Add a missing migration for Course.solution_visibility description<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_institution'),
]
operations = [
migrations.AlterField(
model_name='problemset',
name='solution_visibility',
field=models.CharField(default=b'S', max_length=20, verbose_name='Solution visibility', choices=[(b'H', 'Official solutions are hidden'), (b'S', 'Official solutions are visible when solved'), (b'V', 'Official solutions are visible')]),
),
]
|
Add a missing migration for Course.solution_visibility description# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_institution'),
]
operations = [
migrations.AlterField(
model_name='problemset',
name='solution_visibility',
field=models.CharField(default=b'S', max_length=20, verbose_name='Solution visibility', choices=[(b'H', 'Official solutions are hidden'), (b'S', 'Official solutions are visible when solved'), (b'V', 'Official solutions are visible')]),
),
]
|
<commit_before><commit_msg>Add a missing migration for Course.solution_visibility description<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0004_course_institution'),
]
operations = [
migrations.AlterField(
model_name='problemset',
name='solution_visibility',
field=models.CharField(default=b'S', max_length=20, verbose_name='Solution visibility', choices=[(b'H', 'Official solutions are hidden'), (b'S', 'Official solutions are visible when solved'), (b'V', 'Official solutions are visible')]),
),
]
|
|
64fce7c67849f44492d55ccf8a745b252bf1368b
|
numpy/polynomial/tests/test_printing.py
|
numpy/polynomial/tests/test_printing.py
|
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0,1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0,1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0,1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0,1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0,1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0,1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0,1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0,1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0,1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0,1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
|
Add some tests for polynomial printing.
|
ENH: Add some tests for polynomial printing.
|
Python
|
bsd-3-clause
|
jakirkham/numpy,bertrand-l/numpy,githubmlai/numpy,SiccarPoint/numpy,jorisvandenbossche/numpy,ChristopherHogan/numpy,GrimDerp/numpy,simongibbons/numpy,pizzathief/numpy,ahaldane/numpy,argriffing/numpy,sigma-random/numpy,kirillzhuravlev/numpy,MSeifert04/numpy,NextThought/pypy-numpy,matthew-brett/numpy,astrofrog/numpy,ogrisel/numpy,dch312/numpy,sonnyhu/numpy,behzadnouri/numpy,ssanderson/numpy,mattip/numpy,dwf/numpy,anntzer/numpy,pelson/numpy,dwf/numpy,Dapid/numpy,madphysicist/numpy,tdsmith/numpy,ChanderG/numpy,stefanv/numpy,nguyentu1602/numpy,skymanaditya1/numpy,tynn/numpy,SunghanKim/numpy,andsor/numpy,Yusa95/numpy,Anwesh43/numpy,jonathanunderwood/numpy,felipebetancur/numpy,tdsmith/numpy,chiffa/numpy,ajdawson/numpy,brandon-rhodes/numpy,mingwpy/numpy,b-carter/numpy,sigma-random/numpy,pdebuyl/numpy,maniteja123/numpy,rhythmsosad/numpy,ContinuumIO/numpy,CMartelLML/numpy,MichaelAquilina/numpy,Linkid/numpy,ESSS/numpy,dch312/numpy,matthew-brett/numpy,BabeNovelty/numpy,MSeifert04/numpy,kirillzhuravlev/numpy,GrimDerp/numpy,drasmuss/numpy,MaPePeR/numpy,WarrenWeckesser/numpy,yiakwy/numpy,ViralLeadership/numpy,jorisvandenbossche/numpy,chiffa/numpy,mathdd/numpy,sinhrks/numpy,CMartelLML/numpy,ahaldane/numpy,numpy/numpy,matthew-brett/numpy,dimasad/numpy,brandon-rhodes/numpy,jankoslavic/numpy,AustereCuriosity/numpy,mhvk/numpy,jakirkham/numpy,endolith/numpy,MichaelAquilina/numpy,jakirkham/numpy,larsmans/numpy,has2k1/numpy,bmorris3/numpy,grlee77/numpy,KaelChen/numpy,gfyoung/numpy,rherault-insa/numpy,rgommers/numpy,nbeaver/numpy,ddasilva/numpy,naritta/numpy,AustereCuriosity/numpy,seberg/numpy,shoyer/numpy,skwbc/numpy,endolith/numpy,ewmoore/numpy,abalkin/numpy,cowlicks/numpy,ViralLeadership/numpy,kiwifb/numpy,groutr/numpy,Yusa95/numpy,nbeaver/numpy,Srisai85/numpy,pyparallel/numpy,njase/numpy,Yusa95/numpy,WarrenWeckesser/numpy,seberg/numpy,rmcgibbo/numpy,musically-ut/numpy,mathdd/numpy,naritta/numpy,bertrand-l/numpy,cjermain/numpy,dato-code/numpy,matthew-brett/numpy,jschueller/numpy,mathdd/numpy,bmorris3/numpy,SunghanKim/numpy,stuarteberg/numpy,embray/numpy,astrofrog/numpy,andsor/numpy,b-carter/numpy,skwbc/numpy,rajathkumarmp/numpy,matthew-brett/numpy,tynn/numpy,tynn/numpy,skymanaditya1/numpy,moreati/numpy,b-carter/numpy,argriffing/numpy,embray/numpy,rherault-insa/numpy,jorisvandenbossche/numpy,felipebetancur/numpy,njase/numpy,ddasilva/numpy,BabeNovelty/numpy,ekalosak/numpy,pizzathief/numpy,kirillzhuravlev/numpy,NextThought/pypy-numpy,joferkington/numpy,WillieMaddox/numpy,rgommers/numpy,grlee77/numpy,MaPePeR/numpy,githubmlai/numpy,tacaswell/numpy,larsmans/numpy,WarrenWeckesser/numpy,dwf/numpy,madphysicist/numpy,BabeNovelty/numpy,rgommers/numpy,rudimeier/numpy,githubmlai/numpy,charris/numpy,drasmuss/numpy,empeeu/numpy,ChanderG/numpy,maniteja123/numpy,utke1/numpy,jankoslavic/numpy,stefanv/numpy,simongibbons/numpy,cowlicks/numpy,skwbc/numpy,gfyoung/numpy,dwillmer/numpy,Linkid/numpy,stuarteberg/numpy,charris/numpy,yiakwy/numpy,gfyoung/numpy,yiakwy/numpy,mattip/numpy,ContinuumIO/numpy,ChristopherHogan/numpy,embray/numpy,BMJHayward/numpy,pizzathief/numpy,rhythmsosad/numpy,empeeu/numpy,leifdenby/numpy,mortada/numpy,MichaelAquilina/numpy,grlee77/numpy,ssanderson/numpy,trankmichael/numpy,ssanderson/numpy,moreati/numpy,larsmans/numpy,chatcannon/numpy,naritta/numpy,sinhrks/numpy,astrofrog/numpy,shoyer/numpy,ESSS/numpy,skymanaditya1/numpy,mortada/numpy,NextThought/pypy-numpy,pbrod/numpy,pyparallel/numpy,ekalosak/numpy,hainm/numpy,has2k1/numpy,has2k1/numpy,Anwesh43/numpy,mindw/numpy,ogrisel/numpy,abalkin/numpy,mwiebe/numpy,kiwifb/numpy,trankmichael/numpy,MSeifert04/numpy,sinhrks/numpy,utke1/numpy,SiccarPoint/numpy,Eric89GXL/numpy,dch312/numpy,mingwpy/numpy,SiccarPoint/numpy,Yusa95/numpy,CMartelLML/numpy,rhythmsosad/numpy,chatcannon/numpy,SiccarPoint/numpy,jonathanunderwood/numpy,anntzer/numpy,Eric89GXL/numpy,ESSS/numpy,chiffa/numpy,MaPePeR/numpy,Srisai85/numpy,ekalosak/numpy,ahaldane/numpy,moreati/numpy,numpy/numpy,jschueller/numpy,nguyentu1602/numpy,jakirkham/numpy,gmcastil/numpy,embray/numpy,mwiebe/numpy,pbrod/numpy,shoyer/numpy,solarjoe/numpy,ajdawson/numpy,pbrod/numpy,jorisvandenbossche/numpy,nbeaver/numpy,mhvk/numpy,pyparallel/numpy,SunghanKim/numpy,MaPePeR/numpy,ajdawson/numpy,mingwpy/numpy,ahaldane/numpy,jakirkham/numpy,MSeifert04/numpy,ekalosak/numpy,behzadnouri/numpy,WillieMaddox/numpy,anntzer/numpy,ChristopherHogan/numpy,Anwesh43/numpy,rherault-insa/numpy,mattip/numpy,musically-ut/numpy,ewmoore/numpy,stuarteberg/numpy,solarjoe/numpy,tdsmith/numpy,drasmuss/numpy,madphysicist/numpy,ewmoore/numpy,abalkin/numpy,mingwpy/numpy,BabeNovelty/numpy,mattip/numpy,bmorris3/numpy,jschueller/numpy,musically-ut/numpy,jorisvandenbossche/numpy,hainm/numpy,mhvk/numpy,Srisai85/numpy,joferkington/numpy,ahaldane/numpy,BMJHayward/numpy,rmcgibbo/numpy,astrofrog/numpy,pbrod/numpy,sonnyhu/numpy,charris/numpy,gmcastil/numpy,grlee77/numpy,bringingheavendown/numpy,dwillmer/numpy,BMJHayward/numpy,behzadnouri/numpy,argriffing/numpy,ContinuumIO/numpy,rmcgibbo/numpy,ChanderG/numpy,leifdenby/numpy,numpy/numpy,cjermain/numpy,GrimDerp/numpy,groutr/numpy,gmcastil/numpy,cowlicks/numpy,ewmoore/numpy,sonnyhu/numpy,leifdenby/numpy,madphysicist/numpy,dato-code/numpy,brandon-rhodes/numpy,KaelChen/numpy,charris/numpy,dwf/numpy,nguyentu1602/numpy,MSeifert04/numpy,felipebetancur/numpy,has2k1/numpy,astrofrog/numpy,rudimeier/numpy,WillieMaddox/numpy,sonnyhu/numpy,GaZ3ll3/numpy,GaZ3ll3/numpy,dwf/numpy,mortada/numpy,shoyer/numpy,grlee77/numpy,dimasad/numpy,immerrr/numpy,sinhrks/numpy,BMJHayward/numpy,maniteja123/numpy,seberg/numpy,numpy/numpy,jschueller/numpy,pelson/numpy,pizzathief/numpy,mindw/numpy,felipebetancur/numpy,trankmichael/numpy,GaZ3ll3/numpy,rhythmsosad/numpy,Srisai85/numpy,ogrisel/numpy,chatcannon/numpy,immerrr/numpy,pelson/numpy,pelson/numpy,tdsmith/numpy,SunghanKim/numpy,ViralLeadership/numpy,Linkid/numpy,mortada/numpy,seberg/numpy,tacaswell/numpy,groutr/numpy,pdebuyl/numpy,sigma-random/numpy,shoyer/numpy,andsor/numpy,dwillmer/numpy,AustereCuriosity/numpy,GrimDerp/numpy,bmorris3/numpy,ChanderG/numpy,GaZ3ll3/numpy,ajdawson/numpy,rgommers/numpy,stefanv/numpy,brandon-rhodes/numpy,simongibbons/numpy,dato-code/numpy,hainm/numpy,Dapid/numpy,mwiebe/numpy,madphysicist/numpy,stefanv/numpy,jonathanunderwood/numpy,njase/numpy,cjermain/numpy,naritta/numpy,githubmlai/numpy,andsor/numpy,kirillzhuravlev/numpy,ChristopherHogan/numpy,stuarteberg/numpy,KaelChen/numpy,bringingheavendown/numpy,skymanaditya1/numpy,empeeu/numpy,dwillmer/numpy,joferkington/numpy,Anwesh43/numpy,rajathkumarmp/numpy,pizzathief/numpy,empeeu/numpy,ddasilva/numpy,bertrand-l/numpy,utke1/numpy,KaelChen/numpy,sigma-random/numpy,rajathkumarmp/numpy,mhvk/numpy,tacaswell/numpy,Eric89GXL/numpy,jankoslavic/numpy,endolith/numpy,ogrisel/numpy,Dapid/numpy,cowlicks/numpy,yiakwy/numpy,ewmoore/numpy,hainm/numpy,endolith/numpy,ogrisel/numpy,simongibbons/numpy,mathdd/numpy,mindw/numpy,jankoslavic/numpy,bringingheavendown/numpy,rajathkumarmp/numpy,Linkid/numpy,trankmichael/numpy,pbrod/numpy,pdebuyl/numpy,immerrr/numpy,Eric89GXL/numpy,MichaelAquilina/numpy,rudimeier/numpy,nguyentu1602/numpy,mhvk/numpy,immerrr/numpy,CMartelLML/numpy,solarjoe/numpy,musically-ut/numpy,anntzer/numpy,rudimeier/numpy,dimasad/numpy,larsmans/numpy,rmcgibbo/numpy,dimasad/numpy,WarrenWeckesser/numpy,pdebuyl/numpy,simongibbons/numpy,cjermain/numpy,mindw/numpy,embray/numpy,stefanv/numpy,kiwifb/numpy,dch312/numpy,pelson/numpy,NextThought/pypy-numpy,joferkington/numpy,WarrenWeckesser/numpy,dato-code/numpy
|
ENH: Add some tests for polynomial printing.
|
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0,1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0,1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0,1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0,1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0,1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0,1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0,1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0,1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0,1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0,1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>ENH: Add some tests for polynomial printing.<commit_after>
|
import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0,1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0,1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0,1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0,1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0,1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0,1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0,1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0,1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0,1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0,1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
|
ENH: Add some tests for polynomial printing.import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0,1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0,1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0,1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0,1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0,1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0,1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0,1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0,1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0,1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0,1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>ENH: Add some tests for polynomial printing.<commit_after>import numpy.polynomial as poly
from numpy.testing import TestCase, run_module_suite, assert_
class test_str(TestCase):
def test_polynomial_str(self):
res = str(poly.Polynomial([0,1]))
tgt = 'poly([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0,1]))
tgt = 'leg([0., 1.])'
assert_(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0,1]))
tgt = 'herm([0., 1.])'
assert_(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0,1]))
tgt = 'herme([0., 1.])'
assert_(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0,1]))
tgt = 'lag([0., 1.])'
assert_(res, tgt)
class test_repr(TestCase):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0,1]))
tgt = 'Polynomial([0., 1.])'
assert_(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0,1]))
tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0,1]))
tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0,1]))
tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0,1]))
tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
assert_(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0,1]))
tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
assert_(res, tgt)
#
if __name__ == "__main__":
run_module_suite()
|
|
d251a2b2cd449ed5078b41b09f50003786f3bbde
|
seq_pad.py
|
seq_pad.py
|
#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__= 'Allison MacLeay'
import sys
import os
import argparse
import glob
import gzip
#-----------------------------------------
# MAIN
# run umitag.py for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="Run umitag utility in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing fastq output')
parser.add_argument('--out', default='seq_pad_out', help='directory for output')
parser.add_argument('--len', default=147, help='length to trim and pad to')
args=parser.parse_args()
l=int(args.len)
os.system("mkdir -p " + args.out)
files = glob.glob(os.path.join(args.dir,"*.fastq.gz"))
for f in files:
pfx = f.split('.')[0].split('/')[-1]
fh = gzip.open(f,'r')
out = gzip.open(os.path.join(args.out, pfx + "_padded.fastq.gz"),'wb')
ct=0
for line in fh:
line = line.strip()
ct+=1
if ct%4 == 2:
#sequence
if len(line) < l:
line = line + ('N'* (l-len(line)))
print line[:l]
out.write(line[:l])
if ct%4 == 0:
#quality
if len(line) < l:
line = line + ('#'* (l-len(line)))
print line[:l]
out.write(line[:l])
fh.close()
out.close()
|
Create script to pad and trim fastq reads to one length
|
Create script to pad and trim fastq reads to one length
|
Python
|
mit
|
alliemacleay/misc
|
Create script to pad and trim fastq reads to one length
|
#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__= 'Allison MacLeay'
import sys
import os
import argparse
import glob
import gzip
#-----------------------------------------
# MAIN
# run umitag.py for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="Run umitag utility in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing fastq output')
parser.add_argument('--out', default='seq_pad_out', help='directory for output')
parser.add_argument('--len', default=147, help='length to trim and pad to')
args=parser.parse_args()
l=int(args.len)
os.system("mkdir -p " + args.out)
files = glob.glob(os.path.join(args.dir,"*.fastq.gz"))
for f in files:
pfx = f.split('.')[0].split('/')[-1]
fh = gzip.open(f,'r')
out = gzip.open(os.path.join(args.out, pfx + "_padded.fastq.gz"),'wb')
ct=0
for line in fh:
line = line.strip()
ct+=1
if ct%4 == 2:
#sequence
if len(line) < l:
line = line + ('N'* (l-len(line)))
print line[:l]
out.write(line[:l])
if ct%4 == 0:
#quality
if len(line) < l:
line = line + ('#'* (l-len(line)))
print line[:l]
out.write(line[:l])
fh.close()
out.close()
|
<commit_before><commit_msg>Create script to pad and trim fastq reads to one length<commit_after>
|
#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__= 'Allison MacLeay'
import sys
import os
import argparse
import glob
import gzip
#-----------------------------------------
# MAIN
# run umitag.py for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="Run umitag utility in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing fastq output')
parser.add_argument('--out', default='seq_pad_out', help='directory for output')
parser.add_argument('--len', default=147, help='length to trim and pad to')
args=parser.parse_args()
l=int(args.len)
os.system("mkdir -p " + args.out)
files = glob.glob(os.path.join(args.dir,"*.fastq.gz"))
for f in files:
pfx = f.split('.')[0].split('/')[-1]
fh = gzip.open(f,'r')
out = gzip.open(os.path.join(args.out, pfx + "_padded.fastq.gz"),'wb')
ct=0
for line in fh:
line = line.strip()
ct+=1
if ct%4 == 2:
#sequence
if len(line) < l:
line = line + ('N'* (l-len(line)))
print line[:l]
out.write(line[:l])
if ct%4 == 0:
#quality
if len(line) < l:
line = line + ('#'* (l-len(line)))
print line[:l]
out.write(line[:l])
fh.close()
out.close()
|
Create script to pad and trim fastq reads to one length#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__= 'Allison MacLeay'
import sys
import os
import argparse
import glob
import gzip
#-----------------------------------------
# MAIN
# run umitag.py for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="Run umitag utility in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing fastq output')
parser.add_argument('--out', default='seq_pad_out', help='directory for output')
parser.add_argument('--len', default=147, help='length to trim and pad to')
args=parser.parse_args()
l=int(args.len)
os.system("mkdir -p " + args.out)
files = glob.glob(os.path.join(args.dir,"*.fastq.gz"))
for f in files:
pfx = f.split('.')[0].split('/')[-1]
fh = gzip.open(f,'r')
out = gzip.open(os.path.join(args.out, pfx + "_padded.fastq.gz"),'wb')
ct=0
for line in fh:
line = line.strip()
ct+=1
if ct%4 == 2:
#sequence
if len(line) < l:
line = line + ('N'* (l-len(line)))
print line[:l]
out.write(line[:l])
if ct%4 == 0:
#quality
if len(line) < l:
line = line + ('#'* (l-len(line)))
print line[:l]
out.write(line[:l])
fh.close()
out.close()
|
<commit_before><commit_msg>Create script to pad and trim fastq reads to one length<commit_after>#!/usr/bin/python
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__author__= 'Allison MacLeay'
import sys
import os
import argparse
import glob
import gzip
#-----------------------------------------
# MAIN
# run umitag.py for all files in a directory
# that have the same prefix
#-----------------------------------------
if __name__ == '__main__':
parser=argparse.ArgumentParser(description="Run umitag utility in batches of similarly prefixed names.")
parser.add_argument('--dir', default='.', help='directory containing fastq output')
parser.add_argument('--out', default='seq_pad_out', help='directory for output')
parser.add_argument('--len', default=147, help='length to trim and pad to')
args=parser.parse_args()
l=int(args.len)
os.system("mkdir -p " + args.out)
files = glob.glob(os.path.join(args.dir,"*.fastq.gz"))
for f in files:
pfx = f.split('.')[0].split('/')[-1]
fh = gzip.open(f,'r')
out = gzip.open(os.path.join(args.out, pfx + "_padded.fastq.gz"),'wb')
ct=0
for line in fh:
line = line.strip()
ct+=1
if ct%4 == 2:
#sequence
if len(line) < l:
line = line + ('N'* (l-len(line)))
print line[:l]
out.write(line[:l])
if ct%4 == 0:
#quality
if len(line) < l:
line = line + ('#'* (l-len(line)))
print line[:l]
out.write(line[:l])
fh.close()
out.close()
|
|
d25af87006ac21f55706c3a5579aec3c961b88e8
|
download_mdcs_data.py
|
download_mdcs_data.py
|
"""Fetch images from MDCS.
"""
import json
import requests
import xmltodict
def download_mdcs_data():
user = "dwheeler"
password = "12345"
mdcs_url = "http://129.6.153.123:8000"
schema_title = 'SemImage'
url = mdcs_url + "/rest/templates/select/all"
allSchemas = json.loads(requests.get(url, auth=(user, password)).text)
schemaIDs = [schema['id'] for schema in allSchemas if schema['title'] == schema_title]
url = mdcs_url + "/rest/explore/query-by-example"
query = {"schema" : schemaIDs[0]}
req_data = {"query" : json.dumps(query)}
qres = json.loads(requests.post(url, req_data, auth=(user, password)).text)
imgfile = [data['title'] for data in qres]
img_urls = [xmltodict.parse(data['content'])['semImage']['imageFile'] for data in qres]
# for i in range(len(qres)):
# imgfile.append(qres[i]['title'])
# content = qres[i]['content']
# # qdata = DMD.DataModelDict(content)
# content_dict = xmltodict.parse(content)
# # img_urls.append(qdata.find('imageFile'))
# img_urls.append(content_dict['semImage']['imageFile'])
print("no_images: ",len(img_urls))
print()
print(imgfile)
print()
print(img_urls)
if __name__ == '__main__':
download_mdcs_data()
|
Add script to download data from MDCS
|
Add script to download data from MDCS
|
Python
|
mit
|
wd15/sem-image-stats
|
Add script to download data from MDCS
|
"""Fetch images from MDCS.
"""
import json
import requests
import xmltodict
def download_mdcs_data():
user = "dwheeler"
password = "12345"
mdcs_url = "http://129.6.153.123:8000"
schema_title = 'SemImage'
url = mdcs_url + "/rest/templates/select/all"
allSchemas = json.loads(requests.get(url, auth=(user, password)).text)
schemaIDs = [schema['id'] for schema in allSchemas if schema['title'] == schema_title]
url = mdcs_url + "/rest/explore/query-by-example"
query = {"schema" : schemaIDs[0]}
req_data = {"query" : json.dumps(query)}
qres = json.loads(requests.post(url, req_data, auth=(user, password)).text)
imgfile = [data['title'] for data in qres]
img_urls = [xmltodict.parse(data['content'])['semImage']['imageFile'] for data in qres]
# for i in range(len(qres)):
# imgfile.append(qres[i]['title'])
# content = qres[i]['content']
# # qdata = DMD.DataModelDict(content)
# content_dict = xmltodict.parse(content)
# # img_urls.append(qdata.find('imageFile'))
# img_urls.append(content_dict['semImage']['imageFile'])
print("no_images: ",len(img_urls))
print()
print(imgfile)
print()
print(img_urls)
if __name__ == '__main__':
download_mdcs_data()
|
<commit_before><commit_msg>Add script to download data from MDCS<commit_after>
|
"""Fetch images from MDCS.
"""
import json
import requests
import xmltodict
def download_mdcs_data():
user = "dwheeler"
password = "12345"
mdcs_url = "http://129.6.153.123:8000"
schema_title = 'SemImage'
url = mdcs_url + "/rest/templates/select/all"
allSchemas = json.loads(requests.get(url, auth=(user, password)).text)
schemaIDs = [schema['id'] for schema in allSchemas if schema['title'] == schema_title]
url = mdcs_url + "/rest/explore/query-by-example"
query = {"schema" : schemaIDs[0]}
req_data = {"query" : json.dumps(query)}
qres = json.loads(requests.post(url, req_data, auth=(user, password)).text)
imgfile = [data['title'] for data in qres]
img_urls = [xmltodict.parse(data['content'])['semImage']['imageFile'] for data in qres]
# for i in range(len(qres)):
# imgfile.append(qres[i]['title'])
# content = qres[i]['content']
# # qdata = DMD.DataModelDict(content)
# content_dict = xmltodict.parse(content)
# # img_urls.append(qdata.find('imageFile'))
# img_urls.append(content_dict['semImage']['imageFile'])
print("no_images: ",len(img_urls))
print()
print(imgfile)
print()
print(img_urls)
if __name__ == '__main__':
download_mdcs_data()
|
Add script to download data from MDCS"""Fetch images from MDCS.
"""
import json
import requests
import xmltodict
def download_mdcs_data():
user = "dwheeler"
password = "12345"
mdcs_url = "http://129.6.153.123:8000"
schema_title = 'SemImage'
url = mdcs_url + "/rest/templates/select/all"
allSchemas = json.loads(requests.get(url, auth=(user, password)).text)
schemaIDs = [schema['id'] for schema in allSchemas if schema['title'] == schema_title]
url = mdcs_url + "/rest/explore/query-by-example"
query = {"schema" : schemaIDs[0]}
req_data = {"query" : json.dumps(query)}
qres = json.loads(requests.post(url, req_data, auth=(user, password)).text)
imgfile = [data['title'] for data in qres]
img_urls = [xmltodict.parse(data['content'])['semImage']['imageFile'] for data in qres]
# for i in range(len(qres)):
# imgfile.append(qres[i]['title'])
# content = qres[i]['content']
# # qdata = DMD.DataModelDict(content)
# content_dict = xmltodict.parse(content)
# # img_urls.append(qdata.find('imageFile'))
# img_urls.append(content_dict['semImage']['imageFile'])
print("no_images: ",len(img_urls))
print()
print(imgfile)
print()
print(img_urls)
if __name__ == '__main__':
download_mdcs_data()
|
<commit_before><commit_msg>Add script to download data from MDCS<commit_after>"""Fetch images from MDCS.
"""
import json
import requests
import xmltodict
def download_mdcs_data():
user = "dwheeler"
password = "12345"
mdcs_url = "http://129.6.153.123:8000"
schema_title = 'SemImage'
url = mdcs_url + "/rest/templates/select/all"
allSchemas = json.loads(requests.get(url, auth=(user, password)).text)
schemaIDs = [schema['id'] for schema in allSchemas if schema['title'] == schema_title]
url = mdcs_url + "/rest/explore/query-by-example"
query = {"schema" : schemaIDs[0]}
req_data = {"query" : json.dumps(query)}
qres = json.loads(requests.post(url, req_data, auth=(user, password)).text)
imgfile = [data['title'] for data in qres]
img_urls = [xmltodict.parse(data['content'])['semImage']['imageFile'] for data in qres]
# for i in range(len(qres)):
# imgfile.append(qres[i]['title'])
# content = qres[i]['content']
# # qdata = DMD.DataModelDict(content)
# content_dict = xmltodict.parse(content)
# # img_urls.append(qdata.find('imageFile'))
# img_urls.append(content_dict['semImage']['imageFile'])
print("no_images: ",len(img_urls))
print()
print(imgfile)
print()
print(img_urls)
if __name__ == '__main__':
download_mdcs_data()
|
|
b35e780364ca2d06902302b165ce2261ec6795a1
|
ona_migration_script/test_migrate_toilet_codes.py
|
ona_migration_script/test_migrate_toilet_codes.py
|
import json
import requests
from requests_testadapter import TestAdapter
import unittest
import migrate_toilet_codes
class TestCreateSession(unittest.TestCase):
def test_create_session(self):
username = 'testuser'
password = 'testpass'
s = migrate_toilet_codes.create_session(username, password)
self.assertTrue(isinstance(s, requests.Session))
self.assertEqual(
s.headers['Content-type'], "application/json; charset=utf-8")
self.assertEqual(s.auth, (username, password))
class TestGetAllToilets(unittest.TestCase):
def test_get_list_of_toilets(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
return_data = [
{
"id": 94,
"code": "RR007094FT",
"lat": -34.01691,
"lon": 18.66339,
"section": "RR",
"section_number": "94",
"cluster": "7",
"toilet_type": "FT"
},
{
"id": 1,
"code": "RR001001FT",
"lat": -34.01667,
"lon": 18.66404,
"section": "RR",
"section_number": "1",
"cluster": "1",
"toilet_type": "FT"
}
]
s.mount(url, TestAdapter(json.dumps(return_data)))
toilets = migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(return_data, toilets)
def test_http_errors_raised(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
s.mount(url, TestAdapter('', status=404))
with self.assertRaises(requests.HTTPError) as e:
migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(e.exception.response.status_code, 404)
|
Add tests for getting all toilets
|
Add tests for getting all toilets
|
Python
|
bsd-3-clause
|
praekelt/go-imali-yethu-js,praekelt/go-imali-yethu-js,praekelt/go-imali-yethu-js
|
Add tests for getting all toilets
|
import json
import requests
from requests_testadapter import TestAdapter
import unittest
import migrate_toilet_codes
class TestCreateSession(unittest.TestCase):
def test_create_session(self):
username = 'testuser'
password = 'testpass'
s = migrate_toilet_codes.create_session(username, password)
self.assertTrue(isinstance(s, requests.Session))
self.assertEqual(
s.headers['Content-type'], "application/json; charset=utf-8")
self.assertEqual(s.auth, (username, password))
class TestGetAllToilets(unittest.TestCase):
def test_get_list_of_toilets(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
return_data = [
{
"id": 94,
"code": "RR007094FT",
"lat": -34.01691,
"lon": 18.66339,
"section": "RR",
"section_number": "94",
"cluster": "7",
"toilet_type": "FT"
},
{
"id": 1,
"code": "RR001001FT",
"lat": -34.01667,
"lon": 18.66404,
"section": "RR",
"section_number": "1",
"cluster": "1",
"toilet_type": "FT"
}
]
s.mount(url, TestAdapter(json.dumps(return_data)))
toilets = migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(return_data, toilets)
def test_http_errors_raised(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
s.mount(url, TestAdapter('', status=404))
with self.assertRaises(requests.HTTPError) as e:
migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(e.exception.response.status_code, 404)
|
<commit_before><commit_msg>Add tests for getting all toilets<commit_after>
|
import json
import requests
from requests_testadapter import TestAdapter
import unittest
import migrate_toilet_codes
class TestCreateSession(unittest.TestCase):
def test_create_session(self):
username = 'testuser'
password = 'testpass'
s = migrate_toilet_codes.create_session(username, password)
self.assertTrue(isinstance(s, requests.Session))
self.assertEqual(
s.headers['Content-type'], "application/json; charset=utf-8")
self.assertEqual(s.auth, (username, password))
class TestGetAllToilets(unittest.TestCase):
def test_get_list_of_toilets(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
return_data = [
{
"id": 94,
"code": "RR007094FT",
"lat": -34.01691,
"lon": 18.66339,
"section": "RR",
"section_number": "94",
"cluster": "7",
"toilet_type": "FT"
},
{
"id": 1,
"code": "RR001001FT",
"lat": -34.01667,
"lon": 18.66404,
"section": "RR",
"section_number": "1",
"cluster": "1",
"toilet_type": "FT"
}
]
s.mount(url, TestAdapter(json.dumps(return_data)))
toilets = migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(return_data, toilets)
def test_http_errors_raised(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
s.mount(url, TestAdapter('', status=404))
with self.assertRaises(requests.HTTPError) as e:
migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(e.exception.response.status_code, 404)
|
Add tests for getting all toiletsimport json
import requests
from requests_testadapter import TestAdapter
import unittest
import migrate_toilet_codes
class TestCreateSession(unittest.TestCase):
def test_create_session(self):
username = 'testuser'
password = 'testpass'
s = migrate_toilet_codes.create_session(username, password)
self.assertTrue(isinstance(s, requests.Session))
self.assertEqual(
s.headers['Content-type'], "application/json; charset=utf-8")
self.assertEqual(s.auth, (username, password))
class TestGetAllToilets(unittest.TestCase):
def test_get_list_of_toilets(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
return_data = [
{
"id": 94,
"code": "RR007094FT",
"lat": -34.01691,
"lon": 18.66339,
"section": "RR",
"section_number": "94",
"cluster": "7",
"toilet_type": "FT"
},
{
"id": 1,
"code": "RR001001FT",
"lat": -34.01667,
"lon": 18.66404,
"section": "RR",
"section_number": "1",
"cluster": "1",
"toilet_type": "FT"
}
]
s.mount(url, TestAdapter(json.dumps(return_data)))
toilets = migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(return_data, toilets)
def test_http_errors_raised(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
s.mount(url, TestAdapter('', status=404))
with self.assertRaises(requests.HTTPError) as e:
migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(e.exception.response.status_code, 404)
|
<commit_before><commit_msg>Add tests for getting all toilets<commit_after>import json
import requests
from requests_testadapter import TestAdapter
import unittest
import migrate_toilet_codes
class TestCreateSession(unittest.TestCase):
def test_create_session(self):
username = 'testuser'
password = 'testpass'
s = migrate_toilet_codes.create_session(username, password)
self.assertTrue(isinstance(s, requests.Session))
self.assertEqual(
s.headers['Content-type'], "application/json; charset=utf-8")
self.assertEqual(s.auth, (username, password))
class TestGetAllToilets(unittest.TestCase):
def test_get_list_of_toilets(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
return_data = [
{
"id": 94,
"code": "RR007094FT",
"lat": -34.01691,
"lon": 18.66339,
"section": "RR",
"section_number": "94",
"cluster": "7",
"toilet_type": "FT"
},
{
"id": 1,
"code": "RR001001FT",
"lat": -34.01667,
"lon": 18.66404,
"section": "RR",
"section_number": "1",
"cluster": "1",
"toilet_type": "FT"
}
]
s.mount(url, TestAdapter(json.dumps(return_data)))
toilets = migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(return_data, toilets)
def test_http_errors_raised(self):
s = requests.Session()
url = 'http://www.example.org/toilet_codes/'
s.mount(url, TestAdapter('', status=404))
with self.assertRaises(requests.HTTPError) as e:
migrate_toilet_codes.get_all_toilets(s, url)
self.assertEqual(e.exception.response.status_code, 404)
|
|
d782809746cfb403358bdfb10215b70c96498264
|
QtViewer.py
|
QtViewer.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Qt interface to display AVT cameras
#
#
# External dependencies
#
from PyQt4 import QtGui, QtCore
#
# Window to display a camera
#
class QtViewer( QtGui.QWidget ) :
#
# Initialisation
#
def __init__( self, camera ) :
# Initialize parent class
QtGui.QWidget.__init__( self )
# Backup the camera
self.camera = camera
# Set the window title
self.setWindowTitle( 'Camera' )
# Create a label to display camera images
self.image_label = QtGui.QLabel( self )
self.image_label.setScaledContents( True )
# Create a dummy image to fill the label
QImage dummy( 100, 100, QImage::Format_RGB32 )
image = dummy
# Create a layout
layout = QtGui.QVBoxLayout( self )
layout.addWidget( self.image_label )
imagelabel->setPixmap( QPixmap::fromImage(image) )
self.setLayout( layout )
self.setGeometry( 100, 100, 200, 200 )
self.show()
|
Introduce a live camera viewer with Qt.
|
Introduce a live camera viewer with Qt.
|
Python
|
mit
|
microy/PyStereoVisionToolkit,microy/VisionToolkit,microy/VisionToolkit,microy/StereoVision,microy/StereoVision,microy/PyStereoVisionToolkit
|
Introduce a live camera viewer with Qt.
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Qt interface to display AVT cameras
#
#
# External dependencies
#
from PyQt4 import QtGui, QtCore
#
# Window to display a camera
#
class QtViewer( QtGui.QWidget ) :
#
# Initialisation
#
def __init__( self, camera ) :
# Initialize parent class
QtGui.QWidget.__init__( self )
# Backup the camera
self.camera = camera
# Set the window title
self.setWindowTitle( 'Camera' )
# Create a label to display camera images
self.image_label = QtGui.QLabel( self )
self.image_label.setScaledContents( True )
# Create a dummy image to fill the label
QImage dummy( 100, 100, QImage::Format_RGB32 )
image = dummy
# Create a layout
layout = QtGui.QVBoxLayout( self )
layout.addWidget( self.image_label )
imagelabel->setPixmap( QPixmap::fromImage(image) )
self.setLayout( layout )
self.setGeometry( 100, 100, 200, 200 )
self.show()
|
<commit_before><commit_msg>Introduce a live camera viewer with Qt.<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Qt interface to display AVT cameras
#
#
# External dependencies
#
from PyQt4 import QtGui, QtCore
#
# Window to display a camera
#
class QtViewer( QtGui.QWidget ) :
#
# Initialisation
#
def __init__( self, camera ) :
# Initialize parent class
QtGui.QWidget.__init__( self )
# Backup the camera
self.camera = camera
# Set the window title
self.setWindowTitle( 'Camera' )
# Create a label to display camera images
self.image_label = QtGui.QLabel( self )
self.image_label.setScaledContents( True )
# Create a dummy image to fill the label
QImage dummy( 100, 100, QImage::Format_RGB32 )
image = dummy
# Create a layout
layout = QtGui.QVBoxLayout( self )
layout.addWidget( self.image_label )
imagelabel->setPixmap( QPixmap::fromImage(image) )
self.setLayout( layout )
self.setGeometry( 100, 100, 200, 200 )
self.show()
|
Introduce a live camera viewer with Qt.#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Qt interface to display AVT cameras
#
#
# External dependencies
#
from PyQt4 import QtGui, QtCore
#
# Window to display a camera
#
class QtViewer( QtGui.QWidget ) :
#
# Initialisation
#
def __init__( self, camera ) :
# Initialize parent class
QtGui.QWidget.__init__( self )
# Backup the camera
self.camera = camera
# Set the window title
self.setWindowTitle( 'Camera' )
# Create a label to display camera images
self.image_label = QtGui.QLabel( self )
self.image_label.setScaledContents( True )
# Create a dummy image to fill the label
QImage dummy( 100, 100, QImage::Format_RGB32 )
image = dummy
# Create a layout
layout = QtGui.QVBoxLayout( self )
layout.addWidget( self.image_label )
imagelabel->setPixmap( QPixmap::fromImage(image) )
self.setLayout( layout )
self.setGeometry( 100, 100, 200, 200 )
self.show()
|
<commit_before><commit_msg>Introduce a live camera viewer with Qt.<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Qt interface to display AVT cameras
#
#
# External dependencies
#
from PyQt4 import QtGui, QtCore
#
# Window to display a camera
#
class QtViewer( QtGui.QWidget ) :
#
# Initialisation
#
def __init__( self, camera ) :
# Initialize parent class
QtGui.QWidget.__init__( self )
# Backup the camera
self.camera = camera
# Set the window title
self.setWindowTitle( 'Camera' )
# Create a label to display camera images
self.image_label = QtGui.QLabel( self )
self.image_label.setScaledContents( True )
# Create a dummy image to fill the label
QImage dummy( 100, 100, QImage::Format_RGB32 )
image = dummy
# Create a layout
layout = QtGui.QVBoxLayout( self )
layout.addWidget( self.image_label )
imagelabel->setPixmap( QPixmap::fromImage(image) )
self.setLayout( layout )
self.setGeometry( 100, 100, 200, 200 )
self.show()
|
|
02d7e423416ab90bdc4db6428c51efaf6f33a4c6
|
dbaas/dbaas/templatetags/settings_tags.py
|
dbaas/dbaas/templatetags/settings_tags.py
|
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag()
def setting(var_name):
"""
Get a var from settings
"""
return getattr(settings, var_name)
|
Create templatetag for put a settings var into context
|
Create templatetag for put a settings var into context
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Create templatetag for put a settings var into context
|
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag()
def setting(var_name):
"""
Get a var from settings
"""
return getattr(settings, var_name)
|
<commit_before><commit_msg>Create templatetag for put a settings var into context<commit_after>
|
from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag()
def setting(var_name):
"""
Get a var from settings
"""
return getattr(settings, var_name)
|
Create templatetag for put a settings var into contextfrom django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag()
def setting(var_name):
"""
Get a var from settings
"""
return getattr(settings, var_name)
|
<commit_before><commit_msg>Create templatetag for put a settings var into context<commit_after>from django import template
from django.conf import settings
register = template.Library()
@register.assignment_tag()
def setting(var_name):
"""
Get a var from settings
"""
return getattr(settings, var_name)
|
|
2c651b7083ec368ebf226364a1a1aba5f5ec147e
|
fjord/feedback/migrations/0003_auto__chg_field_simple_created.py
|
fjord/feedback/migrations/0003_auto__chg_field_simple_created.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback']
|
Add migration for datetime change.
|
Add migration for datetime change.
This should have been done when we changed this in the model, but it
wasn't.
|
Python
|
bsd-3-clause
|
hoosteeno/fjord,DESHRAJ/fjord,staranjeet/fjord,hoosteeno/fjord,lgp171188/fjord,DESHRAJ/fjord,hoosteeno/fjord,Ritsyy/fjord,staranjeet/fjord,lgp171188/fjord,rlr/fjord,rlr/fjord,staranjeet/fjord,hoosteeno/fjord,lgp171188/fjord,staranjeet/fjord,rlr/fjord,mozilla/fjord,mozilla/fjord,lgp171188/fjord,Ritsyy/fjord,Ritsyy/fjord,rlr/fjord,DESHRAJ/fjord,mozilla/fjord,mozilla/fjord,Ritsyy/fjord
|
Add migration for datetime change.
This should have been done when we changed this in the model, but it
wasn't.
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback']
|
<commit_before><commit_msg>Add migration for datetime change.
This should have been done when we changed this in the model, but it
wasn't.<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback']
|
Add migration for datetime change.
This should have been done when we changed this in the model, but it
wasn't.# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback']
|
<commit_before><commit_msg>Add migration for datetime change.
This should have been done when we changed this in the model, but it
wasn't.<commit_after># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'Simple.created'
db.alter_column('feedback_simple', 'created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
models = {
'feedback.simple': {
'Meta': {'ordering': "['-created']", 'object_name': 'Simple'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'prodchan': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['feedback']
|
|
635ed78543b3e3e8fe7c52fa91ee8516d617249d
|
data-travel/test_client.py
|
data-travel/test_client.py
|
from socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
Add client sample for test - Connect to server - Receive data from server - Strip the data and print
|
Add client sample for test
- Connect to server
- Receive data from server
- Strip the data and print
|
Python
|
apache-2.0
|
peitaosu/motion-tools
|
Add client sample for test
- Connect to server
- Receive data from server
- Strip the data and print
|
from socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
<commit_before><commit_msg>Add client sample for test
- Connect to server
- Receive data from server
- Strip the data and print<commit_after>
|
from socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
Add client sample for test
- Connect to server
- Receive data from server
- Strip the data and printfrom socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
<commit_before><commit_msg>Add client sample for test
- Connect to server
- Receive data from server
- Strip the data and print<commit_after>from socket import *
host = '127.0.0.1'
port = 1234
bufsize = 1024
addr = (host, port)
client = socket(AF_INET, SOCK_STREAM)
client.connect(addr)
while True:
data = client.recv(bufsize)
if not data:
break
print data.strip()
client.close()
|
|
313e9cae068192fe11ad10ea0b5c05061b0e5c60
|
tests/unit/cloud/clouds/ec2_test.py
|
tests/unit/cloud/clouds/ec2_test.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
|
Add unit test for _validate_key_file_permissions in ec2 module
|
Add unit test for _validate_key_file_permissions in ec2 module
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test for _validate_key_file_permissions in ec2 module
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for _validate_key_file_permissions in ec2 module<commit_after>
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
|
Add unit test for _validate_key_file_permissions in ec2 module# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for _validate_key_file_permissions in ec2 module<commit_after># -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Libs
from salt.cloud.clouds import ec2
from salt.exceptions import SaltCloudSystemExit
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import MagicMock, NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class EC2TestCase(TestCase):
'''
Unit TestCase for salt.cloud.clouds.ec2 module.
'''
def test__validate_key_path_and_mode(self):
with tempfile.NamedTemporaryFile() as f:
key_file = f.name
os.chmod(key_file, 0o644)
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
os.chmod(key_file, 0o600)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
os.chmod(key_file, 0o400)
self.assertTrue(ec2._validate_key_path_and_mode(key_file))
# tmp file removed
self.assertRaises(SaltCloudSystemExit,
ec2._validate_key_path_and_mode,
key_file)
if __name__ == '__main__':
from unit import run_tests
run_tests(EC2TestCase, needs_daemon=False)
|
|
0202f0bd4358d68917af19910bb4e1f0a3dda602
|
scripts/avalon-usb2iic-test.py
|
scripts/avalon-usb2iic-test.py
|
#!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
|
Add usb bridge test(CDC & HID)
|
Add usb bridge test(CDC & HID)
|
Python
|
unlicense
|
archangdcc/avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,Canaan-Creative/Avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,archangdcc/avalon-extras
|
Add usb bridge test(CDC & HID)
|
#!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
|
<commit_before><commit_msg>Add usb bridge test(CDC & HID)<commit_after>
|
#!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
|
Add usb bridge test(CDC & HID)#!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
|
<commit_before><commit_msg>Add usb bridge test(CDC & HID)<commit_after>#!/usr/bin/env python2.7
# This script aim to make a loopback test on cdc or hid.
# The statics is used for comparison, it is not accurate.
from serial import Serial
from optparse import OptionParser
import time
import binascii
import usb.core
import usb.util
import sys
parser = OptionParser()
parser.add_option("-M", "--Mode", dest="run_mode", default="1", help="Run Mode:0-CDC,1-HID; default:0")
(options, args) = parser.parse_args()
LOOP_CNT = 1
def statics(run_mode):
tmp_dat = ""
raw_dat = ""
start = time.time()
for i in range(62):
tmp_dat += '{:02}'.format(i)
for i in range(0, LOOP_CNT):
raw_dat = tmp_dat + '{:02}'.format(64 - (i % 64))
if run_mode == '0':
ser.write(raw_dat.decode('hex'))
res_s = ser.read(64)
else:
hiddev.write(endpout, raw_dat.decode('hex'))
res_s = hiddev.read(endpin, 64, 5000)
if raw_dat != binascii.hexlify(res_s):
print "Failed:" + str(i)
print "TX:" + raw_dat
print "RX:" + binascii.hexlify(res_s)
print "STATICS Begin"
print " Run %s times" %LOOP_CNT
print " Time elapsed: %s" %(time.time() - start)
print "STATICS End"
def enum_usbhid(vendor_id, product_id):
# Find device
hiddev = usb.core.find(idVendor = vendor_id, idProduct = product_id)
if not hiddev:
sys.exit("No Avalon hid dev can be found!")
else:
print "Find an Avalon hid dev"
if hiddev.is_kernel_driver_active(0):
try:
hiddev.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("Could not detach kernel driver: %s" % str(e))
try:
hiddev.set_configuration()
hiddev.reset()
for endp in hiddev[0][(0,0)]:
if endp.bEndpointAddress & 0x80:
endpin = endp.bEndpointAddress
else:
endpout = endp.bEndpointAddress
except usb.core.USBError as e:
sys.exit("Could not set configuration: %s" % str(e))
return hiddev, endpin, endpout
if __name__ == '__main__':
if options.run_mode == '0':
ser = Serial("/dev/ttyACM0", 115200, 8, timeout=1)
else:
hid_vid = 0x1fc9
hid_pid = 0x0081
hiddev, endpin, endpout = enum_usbhid(hid_vid, hid_pid)
statics(options.run_mode)
|
|
162e7dd6595b0d9303ecb1da66893ee353ba413b
|
tracker.py
|
tracker.py
|
import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
Add a first small file watcher that counts words
|
Add a first small file watcher that counts words
- Requires installing python watchdog framework
|
Python
|
mit
|
Kadrian/paper-gamification
|
Add a first small file watcher that counts words
- Requires installing python watchdog framework
|
import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before><commit_msg>Add a first small file watcher that counts words
- Requires installing python watchdog framework<commit_after>
|
import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
Add a first small file watcher that counts words
- Requires installing python watchdog frameworkimport os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
<commit_before><commit_msg>Add a first small file watcher that counts words
- Requires installing python watchdog framework<commit_after>import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from watchdog.events import FileModifiedEvent
class GamificationHandler(FileSystemEventHandler):
def __init__(self, filename):
FileSystemEventHandler.__init__(self)
self.filename = filename
def on_modified(self, event):
if type(event) == FileModifiedEvent:
if os.path.abspath(self.filename) == event.src_path:
self.do_gamification()
def do_gamification(self):
f = open(self.filename)
# Count words
num_words = 0
for line in f.readlines():
words = line.split(" ")
for w in words:
if w.strip() != "" and w.isalnum():
num_words += 1
logging.info("Total num of words: " + str(num_words))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
if len(sys.argv) != 2:
print "Please supply a file to be watched"
sys.exit()
filename = sys.argv[1]
path = os.path.dirname(os.path.abspath(filename))
# Observer setup + start
event_handler = GamificationHandler(filename)
observer = Observer()
observer.schedule(event_handler, path=path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
|
c4c52c98f4c8596b4f19c88fb64e1b0af4f9c4cd
|
tests/test_monitor_progress.py
|
tests/test_monitor_progress.py
|
pytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
|
Add New Test Which Monitors Output
|
Add New Test Which Monitors Output
This test monitors STDERR to detect progress of the progressbar
|
Python
|
bsd-3-clause
|
WoLpH/python-progressbar
|
Add New Test Which Monitors Output
This test monitors STDERR to detect progress of the progressbar
|
pytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
|
<commit_before><commit_msg>Add New Test Which Monitors Output
This test monitors STDERR to detect progress of the progressbar<commit_after>
|
pytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
|
Add New Test Which Monitors Output
This test monitors STDERR to detect progress of the progressbarpytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
|
<commit_before><commit_msg>Add New Test Which Monitors Output
This test monitors STDERR to detect progress of the progressbar<commit_after>pytest_plugins = "pytester"
def test_simple_example(testdir):
""" Run the simple example code in a python subprocess and then compare its
stderr to what we expect to see from it. We run it in a subprocess to
best capture its stderr. We expect to see match_lines in order in the
output. This test is just a sanity check to ensure that the progress
bar progresses from 1 to 10, it does not make sure that the """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(10)):
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 10% \(1 of 10\)",
" 20% \(2 of 10\)",
" 30% \(3 of 10\)",
" 40% \(4 of 10\)",
" 50% \(5 of 10\)",
" 60% \(6 of 10\)",
" 70% \(7 of 10\)",
" 80% \(8 of 10\)",
" 90% \(9 of 10\)",
"100% \(10 of 10\)"
])
def test_rapid_updates(testdir):
""" Run some example code that updates 10 times, then sleeps .1 seconds,
this is meant to test that the progressbar progresses normally with
this sample code, since there were issues with it in the past """
v = testdir.makepyfile("""
import time
import progressbar
bar = progressbar.ProgressBar()
for i in bar(range(100)):
if i % 10 == 0:
time.sleep(0.1)
""")
result = testdir.runpython(v)
result.stderr.re_match_lines([
" 1% \(1 of 100\)",
" 11% \(11 of 100\)",
" 21% \(21 of 100\)",
" 31% \(31 of 100\)",
" 41% \(41 of 100\)",
" 51% \(51 of 100\)",
" 61% \(61 of 100\)",
" 71% \(71 of 100\)",
" 81% \(81 of 100\)",
" 91% \(91 of 100\)",
"100% \(100 of 100\)"
])
|
|
d680d6a20890d3bbce96792fa1e86df28956a859
|
helpers/threading.py
|
helpers/threading.py
|
from threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
|
Add a thread helper module
|
Add a thread helper module
|
Python
|
mit
|
finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is
|
Add a thread helper module
|
from threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
|
<commit_before><commit_msg>Add a thread helper module<commit_after>
|
from threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
|
Add a thread helper modulefrom threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
|
<commit_before><commit_msg>Add a thread helper module<commit_after>from threading import Thread, Lock
list_lock = Lock()
def run_in_thread(app):
def wrapper(fn):
def run(*args, **kwargs):
app.logger.info('Starting thread: {}'.format(fn.__name__))
t = Thread(target=fn,
args=args,
kwargs=kwargs)
t.start()
return t
return run
return wrapper
|
|
0cf2ce2331120c20de0cab384c5fdec763c25c68
|
min-char-rnn/markov-model.py
|
min-char-rnn/markov-model.py
|
# Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
Add a simple markov chain to compare output with RNN
|
Add a simple markov chain to compare output with RNN
|
Python
|
unlicense
|
eliben/deep-learning-samples,eliben/deep-learning-samples
|
Add a simple markov chain to compare output with RNN
|
# Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a simple markov chain to compare output with RNN<commit_after>
|
# Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
Add a simple markov chain to compare output with RNN# Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a simple markov chain to compare output with RNN<commit_after># Simple Markov chain model for character-based text generation.
#
# Only tested with Python 3.6+
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
from collections import defaultdict, Counter
import random
import sys
STATE_LEN = 4
def weighted_from_counter(c):
total = sum(c.values())
idx = random.randrange(total)
for elem, count in c.most_common():
idx -= count
if idx < 0:
return elem
def main():
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
states = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN - 1):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
states[state][next] += 1
print('Model has {0} states'.format(len(states)))
j = 0
for k, v in states.items():
print(k, v)
if j > 9:
break
j += 1
print('Sampling...')
state = random.choice(list(states))
sys.stdout.write(state)
for i in range(200):
nextc = weighted_from_counter(states[state])
sys.stdout.write(nextc)
state = state[1:] + nextc
print()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.