commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22f278b8b457f90a674a50fba121611b87bfdf2e
|
tests/unit/utils/which_test.py
|
tests/unit/utils/which_test.py
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
Add unit test for salt.utils.which
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.<commit_after>
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.<commit_after># Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
|
e55115a7301c02a7b953ec0a250448259a11a5ee
|
zou/debug.py
|
zou/debug.py
|
from flask_socketio import SocketIO
from zou.app import app
socketio = SocketIO(app)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
Add binary for development server
|
[dev] Add binary for development server
|
Python
|
agpl-3.0
|
cgwire/zou
|
[dev] Add binary for development server
|
from flask_socketio import SocketIO
from zou.app import app
socketio = SocketIO(app)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
<commit_before><commit_msg>[dev] Add binary for development server<commit_after>
|
from flask_socketio import SocketIO
from zou.app import app
socketio = SocketIO(app)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
[dev] Add binary for development serverfrom flask_socketio import SocketIO
from zou.app import app
socketio = SocketIO(app)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
<commit_before><commit_msg>[dev] Add binary for development server<commit_after>from flask_socketio import SocketIO
from zou.app import app
socketio = SocketIO(app)
if __name__ == "__main__":
socketio.run(app, debug=True)
|
|
067287c34527ae8a05fa2c13473b147c32b417b0
|
ielex/lexicon/migrations/0140_auto_20161221_1117.py
|
ielex/lexicon/migrations/0140_auto_20161221_1117.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 11:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0139_auto_20161220_1441'),
]
operations = [
migrations.AlterField(
model_name='source',
name='ENTRYTYPE',
field=models.CharField(choices=[('book', 'book'),
('article', 'article'),
('expert', 'expert'),
('online', 'online'),
('inbook', 'inbook'),
('misc', 'misc')],
max_length=32),
),
]
|
Add missing migration for d4db9f29f
|
Add missing migration for d4db9f29f
- Commit d4db9f29f changed the attribute Source.ENTRYTYPE to be of a given choice,
but the migration for this change was missing and is added by this commit.
|
Python
|
bsd-2-clause
|
lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public
|
Add missing migration for d4db9f29f
- Commit d4db9f29f changed the attribute Source.ENTRYTYPE to be of a given choice,
but the migration for this change was missing and is added by this commit.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 11:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0139_auto_20161220_1441'),
]
operations = [
migrations.AlterField(
model_name='source',
name='ENTRYTYPE',
field=models.CharField(choices=[('book', 'book'),
('article', 'article'),
('expert', 'expert'),
('online', 'online'),
('inbook', 'inbook'),
('misc', 'misc')],
max_length=32),
),
]
|
<commit_before><commit_msg>Add missing migration for d4db9f29f
- Commit d4db9f29f changed the attribute Source.ENTRYTYPE to be of a given choice,
but the migration for this change was missing and is added by this commit.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 11:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0139_auto_20161220_1441'),
]
operations = [
migrations.AlterField(
model_name='source',
name='ENTRYTYPE',
field=models.CharField(choices=[('book', 'book'),
('article', 'article'),
('expert', 'expert'),
('online', 'online'),
('inbook', 'inbook'),
('misc', 'misc')],
max_length=32),
),
]
|
Add missing migration for d4db9f29f
- Commit d4db9f29f changed the attribute Source.ENTRYTYPE to be of a given choice,
but the migration for this change was missing and is added by this commit.# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 11:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0139_auto_20161220_1441'),
]
operations = [
migrations.AlterField(
model_name='source',
name='ENTRYTYPE',
field=models.CharField(choices=[('book', 'book'),
('article', 'article'),
('expert', 'expert'),
('online', 'online'),
('inbook', 'inbook'),
('misc', 'misc')],
max_length=32),
),
]
|
<commit_before><commit_msg>Add missing migration for d4db9f29f
- Commit d4db9f29f changed the attribute Source.ENTRYTYPE to be of a given choice,
but the migration for this change was missing and is added by this commit.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 11:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0139_auto_20161220_1441'),
]
operations = [
migrations.AlterField(
model_name='source',
name='ENTRYTYPE',
field=models.CharField(choices=[('book', 'book'),
('article', 'article'),
('expert', 'expert'),
('online', 'online'),
('inbook', 'inbook'),
('misc', 'misc')],
max_length=32),
),
]
|
|
b9b93ff8f95f249836c1c872c126589a245798f0
|
recipes/py2dm/run_test.py
|
recipes/py2dm/run_test.py
|
#!/usr/bin/env python3
import unittest
class Py2DMTestCase(unittest.TestCase):
# The module will throw a warning if the C implementation cannot be
# loaded. Make sure this warning is NOT thrown
@unittest.expectedFailure
def test_cimport(self):
with self.assertWarns(UserWarning):
import py2dm
if __name__ == '__main__':
unittest.main()
|
Test successful load of C extension
|
Test successful load of C extension
|
Python
|
bsd-3-clause
|
goanpeca/staged-recipes,conda-forge/staged-recipes,jakirkham/staged-recipes,goanpeca/staged-recipes,johanneskoester/staged-recipes,conda-forge/staged-recipes,ocefpaf/staged-recipes,ocefpaf/staged-recipes,johanneskoester/staged-recipes,jakirkham/staged-recipes
|
Test successful load of C extension
|
#!/usr/bin/env python3
import unittest
class Py2DMTestCase(unittest.TestCase):
# The module will throw a warning if the C implementation cannot be
# loaded. Make sure this warning is NOT thrown
@unittest.expectedFailure
def test_cimport(self):
with self.assertWarns(UserWarning):
import py2dm
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test successful load of C extension<commit_after>
|
#!/usr/bin/env python3
import unittest
class Py2DMTestCase(unittest.TestCase):
# The module will throw a warning if the C implementation cannot be
# loaded. Make sure this warning is NOT thrown
@unittest.expectedFailure
def test_cimport(self):
with self.assertWarns(UserWarning):
import py2dm
if __name__ == '__main__':
unittest.main()
|
Test successful load of C extension#!/usr/bin/env python3
import unittest
class Py2DMTestCase(unittest.TestCase):
# The module will throw a warning if the C implementation cannot be
# loaded. Make sure this warning is NOT thrown
@unittest.expectedFailure
def test_cimport(self):
with self.assertWarns(UserWarning):
import py2dm
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test successful load of C extension<commit_after>#!/usr/bin/env python3
import unittest
class Py2DMTestCase(unittest.TestCase):
# The module will throw a warning if the C implementation cannot be
# loaded. Make sure this warning is NOT thrown
@unittest.expectedFailure
def test_cimport(self):
with self.assertWarns(UserWarning):
import py2dm
if __name__ == '__main__':
unittest.main()
|
|
6e36732150295869a8ddaebbaf6ff07b76eb06b8
|
uwsgi-nginx/app/main.py
|
uwsgi-nginx/app/main.py
|
# test.py
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return [b"Hello World"] # python3
return ["Hello World"] # python2
|
Add default Python example app
|
Add default Python example app
|
Python
|
apache-2.0
|
tiangolo/uwsgi-nginx-docker,tiangolo/uwsgi-nginx-docker
|
Add default Python example app
|
# test.py
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return [b"Hello World"] # python3
return ["Hello World"] # python2
|
<commit_before><commit_msg>Add default Python example app<commit_after>
|
# test.py
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return [b"Hello World"] # python3
return ["Hello World"] # python2
|
Add default Python example app# test.py
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return [b"Hello World"] # python3
return ["Hello World"] # python2
|
<commit_before><commit_msg>Add default Python example app<commit_after># test.py
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
#return [b"Hello World"] # python3
return ["Hello World"] # python2
|
|
014caf2a13f93df60454b05bcc7c8381a424b24b
|
nap/actions.py
|
nap/actions.py
|
from django.utils.encoding import force_text
class CSV(object):
'''
A generator friendly, unicode aware CSV encoder class built for speed.
'''
# What to put between fields
SEP = u','
# What to wrap fields in, if they contain SEP
QUOTE = u'"'
# What to replace a QUOTE in a field with
ESCQUOTE = QUOTE + QUOTE
# What to put between records
LINEBREAK = u'\n'
ENCODING = 'utf-8'
fields = []
def __init__(self, **opts):
'''
opts MUST contain 'fields', a list of field names.
opts may also include 'headers', a list of field headings.
opts MAY override any of the above configurables.
'''
self.__dict__.update(opts)
def write(self, values):
'''Write a row of values'''
def escape_field(val, SEP=self.SEP, QUOTE=self.QUOTE, ESCQ=self.ESCQUOTE):
'''
Escape separator and quote values, and wrap with quotes if needed
Inlined for speed
'''
# escape quotes in the value
val = val.replace(QUOTE, ESCQ)
# if needed, wrap quotes around value
if SEP in val or QUOTE in val:
return QUOTE + val + QUOTE
return val
line = self.SEP.join(map(escape_field, values)) + self.LINEBREAK
if self.ENCODING:
line = line.encode(self.ENCODING)
return line
def write_dict(self, values):
'''Write a row, getting values from a dict.'''
return self.write(map(values.get, self.fields))
def write_headers(self):
'''Write a row of headers.'''
return self.write(self.headers or self.fields)
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
csv = CSV(fields=serialiser._fields.keys())
def inner(ser):
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
Add simple CSV encoder and Export action
|
Add simple CSV encoder and Export action
|
Python
|
bsd-3-clause
|
limbera/django-nap,MarkusH/django-nap
|
Add simple CSV encoder and Export action
|
from django.utils.encoding import force_text
class CSV(object):
'''
A generator friendly, unicode aware CSV encoder class built for speed.
'''
# What to put between fields
SEP = u','
# What to wrap fields in, if they contain SEP
QUOTE = u'"'
# What to replace a QUOTE in a field with
ESCQUOTE = QUOTE + QUOTE
# What to put between records
LINEBREAK = u'\n'
ENCODING = 'utf-8'
fields = []
def __init__(self, **opts):
'''
opts MUST contain 'fields', a list of field names.
opts may also include 'headers', a list of field headings.
opts MAY override any of the above configurables.
'''
self.__dict__.update(opts)
def write(self, values):
'''Write a row of values'''
def escape_field(val, SEP=self.SEP, QUOTE=self.QUOTE, ESCQ=self.ESCQUOTE):
'''
Escape separator and quote values, and wrap with quotes if needed
Inlined for speed
'''
# escape quotes in the value
val = val.replace(QUOTE, ESCQ)
# if needed, wrap quotes around value
if SEP in val or QUOTE in val:
return QUOTE + val + QUOTE
return val
line = self.SEP.join(map(escape_field, values)) + self.LINEBREAK
if self.ENCODING:
line = line.encode(self.ENCODING)
return line
def write_dict(self, values):
'''Write a row, getting values from a dict.'''
return self.write(map(values.get, self.fields))
def write_headers(self):
'''Write a row of headers.'''
return self.write(self.headers or self.fields)
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
csv = CSV(fields=serialiser._fields.keys())
def inner(ser):
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
<commit_before><commit_msg>Add simple CSV encoder and Export action<commit_after>
|
from django.utils.encoding import force_text
class CSV(object):
'''
A generator friendly, unicode aware CSV encoder class built for speed.
'''
# What to put between fields
SEP = u','
# What to wrap fields in, if they contain SEP
QUOTE = u'"'
# What to replace a QUOTE in a field with
ESCQUOTE = QUOTE + QUOTE
# What to put between records
LINEBREAK = u'\n'
ENCODING = 'utf-8'
fields = []
def __init__(self, **opts):
'''
opts MUST contain 'fields', a list of field names.
opts may also include 'headers', a list of field headings.
opts MAY override any of the above configurables.
'''
self.__dict__.update(opts)
def write(self, values):
'''Write a row of values'''
def escape_field(val, SEP=self.SEP, QUOTE=self.QUOTE, ESCQ=self.ESCQUOTE):
'''
Escape separator and quote values, and wrap with quotes if needed
Inlined for speed
'''
# escape quotes in the value
val = val.replace(QUOTE, ESCQ)
# if needed, wrap quotes around value
if SEP in val or QUOTE in val:
return QUOTE + val + QUOTE
return val
line = self.SEP.join(map(escape_field, values)) + self.LINEBREAK
if self.ENCODING:
line = line.encode(self.ENCODING)
return line
def write_dict(self, values):
'''Write a row, getting values from a dict.'''
return self.write(map(values.get, self.fields))
def write_headers(self):
'''Write a row of headers.'''
return self.write(self.headers or self.fields)
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
csv = CSV(fields=serialiser._fields.keys())
def inner(ser):
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
Add simple CSV encoder and Export action
from django.utils.encoding import force_text
class CSV(object):
'''
A generator friendly, unicode aware CSV encoder class built for speed.
'''
# What to put between fields
SEP = u','
# What to wrap fields in, if they contain SEP
QUOTE = u'"'
# What to replace a QUOTE in a field with
ESCQUOTE = QUOTE + QUOTE
# What to put between records
LINEBREAK = u'\n'
ENCODING = 'utf-8'
fields = []
def __init__(self, **opts):
'''
opts MUST contain 'fields', a list of field names.
opts may also include 'headers', a list of field headings.
opts MAY override any of the above configurables.
'''
self.__dict__.update(opts)
def write(self, values):
'''Write a row of values'''
def escape_field(val, SEP=self.SEP, QUOTE=self.QUOTE, ESCQ=self.ESCQUOTE):
'''
Escape separator and quote values, and wrap with quotes if needed
Inlined for speed
'''
# escape quotes in the value
val = val.replace(QUOTE, ESCQ)
# if needed, wrap quotes around value
if SEP in val or QUOTE in val:
return QUOTE + val + QUOTE
return val
line = self.SEP.join(map(escape_field, values)) + self.LINEBREAK
if self.ENCODING:
line = line.encode(self.ENCODING)
return line
def write_dict(self, values):
'''Write a row, getting values from a dict.'''
return self.write(map(values.get, self.fields))
def write_headers(self):
'''Write a row of headers.'''
return self.write(self.headers or self.fields)
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
csv = CSV(fields=serialiser._fields.keys())
def inner(ser):
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
<commit_before><commit_msg>Add simple CSV encoder and Export action<commit_after>
from django.utils.encoding import force_text
class CSV(object):
'''
A generator friendly, unicode aware CSV encoder class built for speed.
'''
# What to put between fields
SEP = u','
# What to wrap fields in, if they contain SEP
QUOTE = u'"'
# What to replace a QUOTE in a field with
ESCQUOTE = QUOTE + QUOTE
# What to put between records
LINEBREAK = u'\n'
ENCODING = 'utf-8'
fields = []
def __init__(self, **opts):
'''
opts MUST contain 'fields', a list of field names.
opts may also include 'headers', a list of field headings.
opts MAY override any of the above configurables.
'''
self.__dict__.update(opts)
def write(self, values):
'''Write a row of values'''
def escape_field(val, SEP=self.SEP, QUOTE=self.QUOTE, ESCQ=self.ESCQUOTE):
'''
Escape separator and quote values, and wrap with quotes if needed
Inlined for speed
'''
# escape quotes in the value
val = val.replace(QUOTE, ESCQ)
# if needed, wrap quotes around value
if SEP in val or QUOTE in val:
return QUOTE + val + QUOTE
return val
line = self.SEP.join(map(escape_field, values)) + self.LINEBREAK
if self.ENCODING:
line = line.encode(self.ENCODING)
return line
def write_dict(self, values):
'''Write a row, getting values from a dict.'''
return self.write(map(values.get, self.fields))
def write_headers(self):
'''Write a row of headers.'''
return self.write(self.headers or self.fields)
class ExportCsv(object):
def __init__(self, serialiser=None, label=None, **opts):
self.serialiser = serialiser
self.opts = opts
if label:
self.short_description = label
def __call__(self, admin, request, queryset):
if self.serialiser is None:
ser_class = modelserialiser_factory(
'%sSerialiser' % admin.__class__.__name__,
admin.model,
**self.opts
)
else:
ser_class = self.serialiser
csv = CSV(fields=serialiser._fields.keys())
def inner(ser):
yield csv.write_headers()
for obj in queryset:
data = {
key: force_text(val)
for key, val in ser.object_deflate(obj).items()
}
yield csv.write_dict(data)
response = StreamingHttpResponse(inner(ser_class()), content_type='text/csv')
filename = admin.csv_
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
|
bbb3638374d748a91f9a1cdc5450b6b2a8474597
|
corehq/apps/commtrack/management/commands/fix_ipm.py
|
corehq/apps/commtrack/management/commands/fix_ipm.py
|
from django.core.management.base import BaseCommand
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.models import SupplyPointCase
class Command(BaseCommand):
startkey = ['ipm-senegal', 'by_type', 'XFormInstance']
endkey = startkey + [{}]
ids = [row['id'] for row in XFormInstance.get_db().view(
"couchforms/all_submissions_by_domain",
startkey=startkey,
endkey=endkey,
reduce=False
)]
to_save = []
for doc in iter_docs(XFormInstance.get_db(), ids):
if 'location_id' in doc['form'] and not doc['form']['location_id']:
case = SupplyPointCase.get(doc['form']['case']['@case_id'])
if case.type == 'supply-point':
print 'updating'
print 'case location_id:', case.location_id
print 'from:', doc['form']['location_id']
doc['form']['location_id'] = case.location_id
print 'to:', doc['form']['location_id']
to_save.append(doc)
if len(to_save) > 500:
XFormInstance.get_db().bulk_save(to_save)
to_save = []
if to_save:
XFormInstance.get_db().bulk_save(to_save)
|
Add command to cleanup commtrack forms
|
Add command to cleanup commtrack forms
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add command to cleanup commtrack forms
|
from django.core.management.base import BaseCommand
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.models import SupplyPointCase
class Command(BaseCommand):
startkey = ['ipm-senegal', 'by_type', 'XFormInstance']
endkey = startkey + [{}]
ids = [row['id'] for row in XFormInstance.get_db().view(
"couchforms/all_submissions_by_domain",
startkey=startkey,
endkey=endkey,
reduce=False
)]
to_save = []
for doc in iter_docs(XFormInstance.get_db(), ids):
if 'location_id' in doc['form'] and not doc['form']['location_id']:
case = SupplyPointCase.get(doc['form']['case']['@case_id'])
if case.type == 'supply-point':
print 'updating'
print 'case location_id:', case.location_id
print 'from:', doc['form']['location_id']
doc['form']['location_id'] = case.location_id
print 'to:', doc['form']['location_id']
to_save.append(doc)
if len(to_save) > 500:
XFormInstance.get_db().bulk_save(to_save)
to_save = []
if to_save:
XFormInstance.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Add command to cleanup commtrack forms<commit_after>
|
from django.core.management.base import BaseCommand
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.models import SupplyPointCase
class Command(BaseCommand):
startkey = ['ipm-senegal', 'by_type', 'XFormInstance']
endkey = startkey + [{}]
ids = [row['id'] for row in XFormInstance.get_db().view(
"couchforms/all_submissions_by_domain",
startkey=startkey,
endkey=endkey,
reduce=False
)]
to_save = []
for doc in iter_docs(XFormInstance.get_db(), ids):
if 'location_id' in doc['form'] and not doc['form']['location_id']:
case = SupplyPointCase.get(doc['form']['case']['@case_id'])
if case.type == 'supply-point':
print 'updating'
print 'case location_id:', case.location_id
print 'from:', doc['form']['location_id']
doc['form']['location_id'] = case.location_id
print 'to:', doc['form']['location_id']
to_save.append(doc)
if len(to_save) > 500:
XFormInstance.get_db().bulk_save(to_save)
to_save = []
if to_save:
XFormInstance.get_db().bulk_save(to_save)
|
Add command to cleanup commtrack formsfrom django.core.management.base import BaseCommand
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.models import SupplyPointCase
class Command(BaseCommand):
startkey = ['ipm-senegal', 'by_type', 'XFormInstance']
endkey = startkey + [{}]
ids = [row['id'] for row in XFormInstance.get_db().view(
"couchforms/all_submissions_by_domain",
startkey=startkey,
endkey=endkey,
reduce=False
)]
to_save = []
for doc in iter_docs(XFormInstance.get_db(), ids):
if 'location_id' in doc['form'] and not doc['form']['location_id']:
case = SupplyPointCase.get(doc['form']['case']['@case_id'])
if case.type == 'supply-point':
print 'updating'
print 'case location_id:', case.location_id
print 'from:', doc['form']['location_id']
doc['form']['location_id'] = case.location_id
print 'to:', doc['form']['location_id']
to_save.append(doc)
if len(to_save) > 500:
XFormInstance.get_db().bulk_save(to_save)
to_save = []
if to_save:
XFormInstance.get_db().bulk_save(to_save)
|
<commit_before><commit_msg>Add command to cleanup commtrack forms<commit_after>from django.core.management.base import BaseCommand
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import iter_docs
from corehq.apps.commtrack.models import SupplyPointCase
class Command(BaseCommand):
startkey = ['ipm-senegal', 'by_type', 'XFormInstance']
endkey = startkey + [{}]
ids = [row['id'] for row in XFormInstance.get_db().view(
"couchforms/all_submissions_by_domain",
startkey=startkey,
endkey=endkey,
reduce=False
)]
to_save = []
for doc in iter_docs(XFormInstance.get_db(), ids):
if 'location_id' in doc['form'] and not doc['form']['location_id']:
case = SupplyPointCase.get(doc['form']['case']['@case_id'])
if case.type == 'supply-point':
print 'updating'
print 'case location_id:', case.location_id
print 'from:', doc['form']['location_id']
doc['form']['location_id'] = case.location_id
print 'to:', doc['form']['location_id']
to_save.append(doc)
if len(to_save) > 500:
XFormInstance.get_db().bulk_save(to_save)
to_save = []
if to_save:
XFormInstance.get_db().bulk_save(to_save)
|
|
f25a7b260ea888b1470e3203b0ce3d94bfdf477d
|
data.py
|
data.py
|
import numpy as np
def _consecutive_index_generator(length, offset=0):
"""Generate pair of ids of consecutive images. Offset is the distance between the images
"""
offset += 1
for i in range(length - offset):
yield (i, i + offset)
def generate_learning_set(array, random_permutation=True, offset=0):
"""Generate learning set of consecutive scans
Parameters
----------
array: numpy array of shape n_scans x n_voxels
Array of masked scans
random_permutation: boolean
If True, consecutive scans are switched with a probability of .5
offset: int
Distance between two consecutive scans
Returns
-------
learning_set: list of (img_a, img_b, label)
Two consecutive images, label is 1 if images are ordered, 0 otherwise
"""
np.random.seed()
learning_set = []
for (ia, ib) in _consecutive_index_generator(array.shape[0], offset=offset):
label = np.random.randint(0, 2)
if label == 0:
ia, ib = ib, ia
learning_set.append((ia, ib, label))
return learning_set
if __name__ == '__main__':
array = np.arange(5)
res = generate_learning_set(array)
assert(len(res) == 4)
for ia, ib, label in res:
if label == 0:
ia, ib = ib, ia
res = generate_learning_set(array, random_permutation=False, offset=1)
assert(len(res) == 3)
for ia, ib, label in res:
assert(ia < ib)
assert(label == 1)
print('Basic testing is OK')
|
Add small script to generate learning set.
|
Add small script to generate learning set.
|
Python
|
mit
|
ogrisel/brain2vec
|
Add small script to generate learning set.
|
import numpy as np
def _consecutive_index_generator(length, offset=0):
"""Generate pair of ids of consecutive images. Offset is the distance between the images
"""
offset += 1
for i in range(length - offset):
yield (i, i + offset)
def generate_learning_set(array, random_permutation=True, offset=0):
"""Generate learning set of consecutive scans
Parameters
----------
array: numpy array of shape n_scans x n_voxels
Array of masked scans
random_permutation: boolean
If True, consecutive scans are switched with a probability of .5
offset: int
Distance between two consecutive scans
Returns
-------
learning_set: list of (img_a, img_b, label)
Two consecutive images, label is 1 if images are ordered, 0 otherwise
"""
np.random.seed()
learning_set = []
for (ia, ib) in _consecutive_index_generator(array.shape[0], offset=offset):
label = np.random.randint(0, 2)
if label == 0:
ia, ib = ib, ia
learning_set.append((ia, ib, label))
return learning_set
if __name__ == '__main__':
array = np.arange(5)
res = generate_learning_set(array)
assert(len(res) == 4)
for ia, ib, label in res:
if label == 0:
ia, ib = ib, ia
res = generate_learning_set(array, random_permutation=False, offset=1)
assert(len(res) == 3)
for ia, ib, label in res:
assert(ia < ib)
assert(label == 1)
print('Basic testing is OK')
|
<commit_before><commit_msg>Add small script to generate learning set.<commit_after>
|
import numpy as np
def _consecutive_index_generator(length, offset=0):
"""Generate pair of ids of consecutive images. Offset is the distance between the images
"""
offset += 1
for i in range(length - offset):
yield (i, i + offset)
def generate_learning_set(array, random_permutation=True, offset=0):
"""Generate learning set of consecutive scans
Parameters
----------
array: numpy array of shape n_scans x n_voxels
Array of masked scans
random_permutation: boolean
If True, consecutive scans are switched with a probability of .5
offset: int
Distance between two consecutive scans
Returns
-------
learning_set: list of (img_a, img_b, label)
Two consecutive images, label is 1 if images are ordered, 0 otherwise
"""
np.random.seed()
learning_set = []
for (ia, ib) in _consecutive_index_generator(array.shape[0], offset=offset):
label = np.random.randint(0, 2)
if label == 0:
ia, ib = ib, ia
learning_set.append((ia, ib, label))
return learning_set
if __name__ == '__main__':
array = np.arange(5)
res = generate_learning_set(array)
assert(len(res) == 4)
for ia, ib, label in res:
if label == 0:
ia, ib = ib, ia
res = generate_learning_set(array, random_permutation=False, offset=1)
assert(len(res) == 3)
for ia, ib, label in res:
assert(ia < ib)
assert(label == 1)
print('Basic testing is OK')
|
Add small script to generate learning set.import numpy as np
def _consecutive_index_generator(length, offset=0):
"""Generate pair of ids of consecutive images. Offset is the distance between the images
"""
offset += 1
for i in range(length - offset):
yield (i, i + offset)
def generate_learning_set(array, random_permutation=True, offset=0):
"""Generate learning set of consecutive scans
Parameters
----------
array: numpy array of shape n_scans x n_voxels
Array of masked scans
random_permutation: boolean
If True, consecutive scans are switched with a probability of .5
offset: int
Distance between two consecutive scans
Returns
-------
learning_set: list of (img_a, img_b, label)
Two consecutive images, label is 1 if images are ordered, 0 otherwise
"""
np.random.seed()
learning_set = []
for (ia, ib) in _consecutive_index_generator(array.shape[0], offset=offset):
label = np.random.randint(0, 2)
if label == 0:
ia, ib = ib, ia
learning_set.append((ia, ib, label))
return learning_set
if __name__ == '__main__':
array = np.arange(5)
res = generate_learning_set(array)
assert(len(res) == 4)
for ia, ib, label in res:
if label == 0:
ia, ib = ib, ia
res = generate_learning_set(array, random_permutation=False, offset=1)
assert(len(res) == 3)
for ia, ib, label in res:
assert(ia < ib)
assert(label == 1)
print('Basic testing is OK')
|
<commit_before><commit_msg>Add small script to generate learning set.<commit_after>import numpy as np
def _consecutive_index_generator(length, offset=0):
"""Generate pair of ids of consecutive images. Offset is the distance between the images
"""
offset += 1
for i in range(length - offset):
yield (i, i + offset)
def generate_learning_set(array, random_permutation=True, offset=0):
"""Generate learning set of consecutive scans
Parameters
----------
array: numpy array of shape n_scans x n_voxels
Array of masked scans
random_permutation: boolean
If True, consecutive scans are switched with a probability of .5
offset: int
Distance between two consecutive scans
Returns
-------
learning_set: list of (img_a, img_b, label)
Two consecutive images, label is 1 if images are ordered, 0 otherwise
"""
np.random.seed()
learning_set = []
for (ia, ib) in _consecutive_index_generator(array.shape[0], offset=offset):
label = np.random.randint(0, 2)
if label == 0:
ia, ib = ib, ia
learning_set.append((ia, ib, label))
return learning_set
if __name__ == '__main__':
array = np.arange(5)
res = generate_learning_set(array)
assert(len(res) == 4)
for ia, ib, label in res:
if label == 0:
ia, ib = ib, ia
res = generate_learning_set(array, random_permutation=False, offset=1)
assert(len(res) == 3)
for ia, ib, label in res:
assert(ia < ib)
assert(label == 1)
print('Basic testing is OK')
|
|
00a4bde767b4392e19ef366c7d9f916415af2762
|
django_mailbox/management/commands/processincomingmessage.py
|
django_mailbox/management/commands/processincomingmessage.py
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['to'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
Use TO header for creating mailbox name.
|
Use TO header for creating mailbox name.
--HG--
branch : exim4_pipe
|
Python
|
mit
|
leifurhauks/django-mailbox,Shekharrajak/django-mailbox,coddingtonbear/django-mailbox,ad-m/django-mailbox
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
Use TO header for creating mailbox name.
--HG--
branch : exim4_pipe
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['to'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
<commit_before>import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
<commit_msg>Use TO header for creating mailbox name.
--HG--
branch : exim4_pipe<commit_after>
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['to'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
Use TO header for creating mailbox name.
--HG--
branch : exim4_pipeimport email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['to'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
<commit_before>import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
<commit_msg>Use TO header for creating mailbox name.
--HG--
branch : exim4_pipe<commit_after>import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['to'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
|
7068258e692872b2fb7bbdade55dadaecff2ffb5
|
python/day11.py
|
python/day11.py
|
#!/usr/local/bin/python3
puzzle_input = 'vzbxkghb'
def validate_password(password):
pass
def test_validate_password():
assert not validate_password('hijklmmn')
assert not validate_password('abbceffg')
assert not validate_password('abbcegjk')
|
Add Day 11 python file
|
Add Day 11 python file
|
Python
|
mit
|
robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions
|
Add Day 11 python file
|
#!/usr/local/bin/python3
puzzle_input = 'vzbxkghb'
def validate_password(password):
pass
def test_validate_password():
assert not validate_password('hijklmmn')
assert not validate_password('abbceffg')
assert not validate_password('abbcegjk')
|
<commit_before><commit_msg>Add Day 11 python file<commit_after>
|
#!/usr/local/bin/python3
puzzle_input = 'vzbxkghb'
def validate_password(password):
pass
def test_validate_password():
assert not validate_password('hijklmmn')
assert not validate_password('abbceffg')
assert not validate_password('abbcegjk')
|
Add Day 11 python file#!/usr/local/bin/python3
puzzle_input = 'vzbxkghb'
def validate_password(password):
pass
def test_validate_password():
assert not validate_password('hijklmmn')
assert not validate_password('abbceffg')
assert not validate_password('abbcegjk')
|
<commit_before><commit_msg>Add Day 11 python file<commit_after>#!/usr/local/bin/python3
puzzle_input = 'vzbxkghb'
def validate_password(password):
pass
def test_validate_password():
assert not validate_password('hijklmmn')
assert not validate_password('abbceffg')
assert not validate_password('abbcegjk')
|
|
b32b84b2c696a4ae5ff57035dee5bc2033affe34
|
tests/test_main.py
|
tests/test_main.py
|
"""tests/test_main.py.
Basic testing of hug's `__main__` module
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
def test_main(capsys):
"""Main module should be importable, but should raise a SystemExit after CLI docs print"""
with pytest.raises(SystemExit):
from hug import __main__
|
Add minimial testing of __main__ module
|
Add minimial testing of __main__ module
|
Python
|
mit
|
timothycrosley/hug,timothycrosley/hug,timothycrosley/hug
|
Add minimial testing of __main__ module
|
"""tests/test_main.py.
Basic testing of hug's `__main__` module
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
def test_main(capsys):
"""Main module should be importable, but should raise a SystemExit after CLI docs print"""
with pytest.raises(SystemExit):
from hug import __main__
|
<commit_before><commit_msg>Add minimial testing of __main__ module<commit_after>
|
"""tests/test_main.py.
Basic testing of hug's `__main__` module
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
def test_main(capsys):
"""Main module should be importable, but should raise a SystemExit after CLI docs print"""
with pytest.raises(SystemExit):
from hug import __main__
|
Add minimial testing of __main__ module"""tests/test_main.py.
Basic testing of hug's `__main__` module
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
def test_main(capsys):
"""Main module should be importable, but should raise a SystemExit after CLI docs print"""
with pytest.raises(SystemExit):
from hug import __main__
|
<commit_before><commit_msg>Add minimial testing of __main__ module<commit_after>"""tests/test_main.py.
Basic testing of hug's `__main__` module
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
def test_main(capsys):
"""Main module should be importable, but should raise a SystemExit after CLI docs print"""
with pytest.raises(SystemExit):
from hug import __main__
|
|
cce2869ac56fe3576e519884fd2a68d75a7fe1cb
|
backend/scripts/countdups.py
|
backend/scripts/countdups.py
|
#!/usr/bin/env python
import rethinkdb as r
conn = r.connect('localhost', 30815, db='materialscommons')
rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size')
total_bytes = 0
total_files = 0
for doc in rql.run(conn):
total_bytes = total_bytes + doc['size']
total_files = total_files + 1
print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
|
Add in script to count duplicates.
|
Add in script to count duplicates.
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add in script to count duplicates.
|
#!/usr/bin/env python
import rethinkdb as r
conn = r.connect('localhost', 30815, db='materialscommons')
rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size')
total_bytes = 0
total_files = 0
for doc in rql.run(conn):
total_bytes = total_bytes + doc['size']
total_files = total_files + 1
print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
|
<commit_before><commit_msg>Add in script to count duplicates.<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
conn = r.connect('localhost', 30815, db='materialscommons')
rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size')
total_bytes = 0
total_files = 0
for doc in rql.run(conn):
total_bytes = total_bytes + doc['size']
total_files = total_files + 1
print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
|
Add in script to count duplicates.#!/usr/bin/env python
import rethinkdb as r
conn = r.connect('localhost', 30815, db='materialscommons')
rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size')
total_bytes = 0
total_files = 0
for doc in rql.run(conn):
total_bytes = total_bytes + doc['size']
total_files = total_files + 1
print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
|
<commit_before><commit_msg>Add in script to count duplicates.<commit_after>#!/usr/bin/env python
import rethinkdb as r
conn = r.connect('localhost', 30815, db='materialscommons')
rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size')
total_bytes = 0
total_files = 0
for doc in rql.run(conn):
total_bytes = total_bytes + doc['size']
total_files = total_files + 1
print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
|
|
063375e9dbb5afad92d21fc6fc22242e373dfdd0
|
py/non-decreasing-array.py
|
py/non-decreasing-array.py
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return True
l = len(nums)
fixed = False
for i in xrange(l):
if i + 1 < l:
if nums[i] > nums[i + 1]:
if fixed:
return False
if i + 2 >= l or i == 0:
pass
elif nums[i + 1] >= nums[i - 1]:
pass
else:
nums[i + 1] = nums[i]
fixed = True
return True
|
Add py solution for 665. Non-decreasing Array
|
Add py solution for 665. Non-decreasing Array
665. Non-decreasing Array: https://leetcode.com/problems/non-decreasing-array/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 665. Non-decreasing Array
665. Non-decreasing Array: https://leetcode.com/problems/non-decreasing-array/
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return True
l = len(nums)
fixed = False
for i in xrange(l):
if i + 1 < l:
if nums[i] > nums[i + 1]:
if fixed:
return False
if i + 2 >= l or i == 0:
pass
elif nums[i + 1] >= nums[i - 1]:
pass
else:
nums[i + 1] = nums[i]
fixed = True
return True
|
<commit_before><commit_msg>Add py solution for 665. Non-decreasing Array
665. Non-decreasing Array: https://leetcode.com/problems/non-decreasing-array/<commit_after>
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return True
l = len(nums)
fixed = False
for i in xrange(l):
if i + 1 < l:
if nums[i] > nums[i + 1]:
if fixed:
return False
if i + 2 >= l or i == 0:
pass
elif nums[i + 1] >= nums[i - 1]:
pass
else:
nums[i + 1] = nums[i]
fixed = True
return True
|
Add py solution for 665. Non-decreasing Array
665. Non-decreasing Array: https://leetcode.com/problems/non-decreasing-array/class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return True
l = len(nums)
fixed = False
for i in xrange(l):
if i + 1 < l:
if nums[i] > nums[i + 1]:
if fixed:
return False
if i + 2 >= l or i == 0:
pass
elif nums[i + 1] >= nums[i - 1]:
pass
else:
nums[i + 1] = nums[i]
fixed = True
return True
|
<commit_before><commit_msg>Add py solution for 665. Non-decreasing Array
665. Non-decreasing Array: https://leetcode.com/problems/non-decreasing-array/<commit_after>class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if not nums:
return True
l = len(nums)
fixed = False
for i in xrange(l):
if i + 1 < l:
if nums[i] > nums[i + 1]:
if fixed:
return False
if i + 2 >= l or i == 0:
pass
elif nums[i + 1] >= nums[i - 1]:
pass
else:
nums[i + 1] = nums[i]
fixed = True
return True
|
|
b1df5d3ad77187d6a2bdf67552268511808ab06b
|
python/test/test_buffer.py
|
python/test/test_buffer.py
|
#!/usr/bin/env python
# coding: utf-8
from nose import main
from nose.tools import *
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(['foo', 'bar']))
obj = unpackb(buf)
assert_equal(['foo', 'bar'], obj)
if __name__ == '__main__':
main()
|
Add test for unpacking buffer object.
|
python: Add test for unpacking buffer object.
|
Python
|
apache-2.0
|
nurse/msgpack-ruby,jen20/msgpack-c,nobu-k/msgpack-c,juanrubio/msgpack-c,soumith/msgpack-c,vashstorm/msgpack-c,AALEKH/msgpack-c,jpetso/msgpack-c,undeadlabs/msgpack-cli,ojundt/msgpack-ruby,kou/msgpack-ruby,kern/msgpack-c,msgpack/msgpack-ruby,okkez/msgpack-ruby,juanrubio/msgpack-c,jpetso/msgpack-c,kpkhxlgy0/msgpack-c,GamePad64/msgpack-c,ojundt/msgpack-ruby,kou/msgpack-ruby,kern/msgpack-c,larskanis/msgpack-ruby,cosmo0920/msgpack-ruby,lvfeng1130/msgpack-c,nobu-k/msgpack-c,soumith/msgpack-c,kpkhxlgy0/msgpack-c,vashstorm/msgpack-c,AALEKH/msgpack-c,okkez/msgpack-ruby,lvfeng1130/msgpack-c,ojundt/msgpack-ruby,jonitis/msgpack-c,kern/msgpack-c,juanrubio/msgpack-c,kou/msgpack-ruby,nurse/msgpack-ruby,kpkhxlgy0/msgpack-c,jpetso/msgpack-c,nobu-k/msgpack-c,soumith/msgpack-c,msgpack/msgpack-ruby,jonitis/msgpack-c,AALEKH/msgpack-c,nurse/msgpack-ruby,kpkhxlgy0/msgpack-c,modulexcite/msgpack-cli,undeadlabs/msgpack-cli,msgpack/msgpack-cli,AALEKH/msgpack-c,msgpack/msgpack-ruby,jonitis/msgpack-c,GamePad64/msgpack-c,jpetso/msgpack-c,larskanis/msgpack-ruby,jpetso/msgpack-c,jen20/msgpack-c,soumith/msgpack-c,okkez/msgpack-ruby,msgpack/msgpack-cli,lvfeng1130/msgpack-c,vashstorm/msgpack-c,soumith/msgpack-c,GamePad64/msgpack-c,AALEKH/msgpack-c,jonitis/msgpack-c,cosmo0920/msgpack-ruby,vashstorm/msgpack-c,jonitis/msgpack-c,larskanis/msgpack-ruby,cosmo0920/msgpack-ruby,juanrubio/msgpack-c,lvfeng1130/msgpack-c,kern/msgpack-c,jen20/msgpack-c,modulexcite/msgpack-cli,nurse/msgpack-ruby,ojundt/msgpack-ruby,GamePad64/msgpack-c,cosmo0920/msgpack-ruby,scopely/msgpack-cli,jen20/msgpack-c,kou/msgpack-ruby,okkez/msgpack-ruby,jen20/msgpack-c,GamePad64/msgpack-c,lvfeng1130/msgpack-c,vashstorm/msgpack-c,kern/msgpack-c,larskanis/msgpack-ruby,juanrubio/msgpack-c,nobu-k/msgpack-c,scopely/msgpack-cli
|
python: Add test for unpacking buffer object.
|
#!/usr/bin/env python
# coding: utf-8
from nose import main
from nose.tools import *
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(['foo', 'bar']))
obj = unpackb(buf)
assert_equal(['foo', 'bar'], obj)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>python: Add test for unpacking buffer object.<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
from nose import main
from nose.tools import *
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(['foo', 'bar']))
obj = unpackb(buf)
assert_equal(['foo', 'bar'], obj)
if __name__ == '__main__':
main()
|
python: Add test for unpacking buffer object.#!/usr/bin/env python
# coding: utf-8
from nose import main
from nose.tools import *
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(['foo', 'bar']))
obj = unpackb(buf)
assert_equal(['foo', 'bar'], obj)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>python: Add test for unpacking buffer object.<commit_after>#!/usr/bin/env python
# coding: utf-8
from nose import main
from nose.tools import *
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(['foo', 'bar']))
obj = unpackb(buf)
assert_equal(['foo', 'bar'], obj)
if __name__ == '__main__':
main()
|
|
f8bd1a55c37776352d59dcd50363bda1ab3e0932
|
examples/welcome_message.py
|
examples/welcome_message.py
|
"""
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
NOTIFIED_USERS_PATH = 'notified_users.txt'
MESSAGE = 'Thank you for a script, sudoguy!'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
# Use custom message from args if exist
if args.message:
MESSAGE = args.message
# Check on existed file with notified users
if not bot.check_if_file_exists(NOTIFIED_USERS_PATH):
followers = bot.get_user_followers(bot.user_id)
followers = map(str, followers)
followers_string = '\n'.join(followers)
with open(NOTIFIED_USERS_PATH, 'w') as users_file:
users_file.write(followers_string)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
notified_users = bot.read_list_from_file(NOTIFIED_USERS_PATH)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.get_user_followers(bot.user_id)
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - set(notified_users)
if not new_followers:
print('New followers not found')
exit()
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
new_notified_users = []
for follower in tqdm(new_followers):
if bot.send_message(MESSAGE, follower):
new_notified_users.append(follower)
if new_notified_users:
print('Updating notified users list')
with open(NOTIFIED_USERS_PATH, 'a') as fo:
new_notified_users_string = '\n'.join(new_notified_users)
fo.write('\n{users}'.format(
users=new_notified_users_string
))
|
Add new script for welcome messaging
|
Add new script for welcome messaging
|
Python
|
apache-2.0
|
ohld/instabot,instagrambot/instabot,instagrambot/instabot
|
Add new script for welcome messaging
|
"""
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
NOTIFIED_USERS_PATH = 'notified_users.txt'
MESSAGE = 'Thank you for a script, sudoguy!'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
# Use custom message from args if exist
if args.message:
MESSAGE = args.message
# Check on existed file with notified users
if not bot.check_if_file_exists(NOTIFIED_USERS_PATH):
followers = bot.get_user_followers(bot.user_id)
followers = map(str, followers)
followers_string = '\n'.join(followers)
with open(NOTIFIED_USERS_PATH, 'w') as users_file:
users_file.write(followers_string)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
notified_users = bot.read_list_from_file(NOTIFIED_USERS_PATH)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.get_user_followers(bot.user_id)
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - set(notified_users)
if not new_followers:
print('New followers not found')
exit()
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
new_notified_users = []
for follower in tqdm(new_followers):
if bot.send_message(MESSAGE, follower):
new_notified_users.append(follower)
if new_notified_users:
print('Updating notified users list')
with open(NOTIFIED_USERS_PATH, 'a') as fo:
new_notified_users_string = '\n'.join(new_notified_users)
fo.write('\n{users}'.format(
users=new_notified_users_string
))
|
<commit_before><commit_msg>Add new script for welcome messaging<commit_after>
|
"""
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
NOTIFIED_USERS_PATH = 'notified_users.txt'
MESSAGE = 'Thank you for a script, sudoguy!'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
# Use custom message from args if exist
if args.message:
MESSAGE = args.message
# Check on existed file with notified users
if not bot.check_if_file_exists(NOTIFIED_USERS_PATH):
followers = bot.get_user_followers(bot.user_id)
followers = map(str, followers)
followers_string = '\n'.join(followers)
with open(NOTIFIED_USERS_PATH, 'w') as users_file:
users_file.write(followers_string)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
notified_users = bot.read_list_from_file(NOTIFIED_USERS_PATH)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.get_user_followers(bot.user_id)
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - set(notified_users)
if not new_followers:
print('New followers not found')
exit()
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
new_notified_users = []
for follower in tqdm(new_followers):
if bot.send_message(MESSAGE, follower):
new_notified_users.append(follower)
if new_notified_users:
print('Updating notified users list')
with open(NOTIFIED_USERS_PATH, 'a') as fo:
new_notified_users_string = '\n'.join(new_notified_users)
fo.write('\n{users}'.format(
users=new_notified_users_string
))
|
Add new script for welcome messaging"""
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
NOTIFIED_USERS_PATH = 'notified_users.txt'
MESSAGE = 'Thank you for a script, sudoguy!'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
# Use custom message from args if exist
if args.message:
MESSAGE = args.message
# Check on existed file with notified users
if not bot.check_if_file_exists(NOTIFIED_USERS_PATH):
followers = bot.get_user_followers(bot.user_id)
followers = map(str, followers)
followers_string = '\n'.join(followers)
with open(NOTIFIED_USERS_PATH, 'w') as users_file:
users_file.write(followers_string)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
notified_users = bot.read_list_from_file(NOTIFIED_USERS_PATH)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.get_user_followers(bot.user_id)
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - set(notified_users)
if not new_followers:
print('New followers not found')
exit()
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
new_notified_users = []
for follower in tqdm(new_followers):
if bot.send_message(MESSAGE, follower):
new_notified_users.append(follower)
if new_notified_users:
print('Updating notified users list')
with open(NOTIFIED_USERS_PATH, 'a') as fo:
new_notified_users_string = '\n'.join(new_notified_users)
fo.write('\n{users}'.format(
users=new_notified_users_string
))
|
<commit_before><commit_msg>Add new script for welcome messaging<commit_after>"""
instabot example
Workflow:
Welcome message for new followers.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
NOTIFIED_USERS_PATH = 'notified_users.txt'
MESSAGE = 'Thank you for a script, sudoguy!'
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-users', type=str, nargs='?', help='a path to already notified users')
parser.add_argument('-message', type=str, nargs='?', help='message text')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
# Use custom message from args if exist
if args.message:
MESSAGE = args.message
# Check on existed file with notified users
if not bot.check_if_file_exists(NOTIFIED_USERS_PATH):
followers = bot.get_user_followers(bot.user_id)
followers = map(str, followers)
followers_string = '\n'.join(followers)
with open(NOTIFIED_USERS_PATH, 'w') as users_file:
users_file.write(followers_string)
print(
'All followers saved in file {users_path}.\n'
'In a next time, for all new followers script will send messages.'.format(
users_path=NOTIFIED_USERS_PATH
)
)
exit(0)
notified_users = bot.read_list_from_file(NOTIFIED_USERS_PATH)
print('Read saved list of notified users. Count: {count}'.format(
count=len(notified_users)
))
all_followers = bot.get_user_followers(bot.user_id)
print('Amount of all followers is {count}'.format(
count=len(all_followers)
))
new_followers = set(all_followers) - set(notified_users)
if not new_followers:
print('New followers not found')
exit()
print('Found new followers. Count: {count}'.format(
count=len(new_followers)
))
new_notified_users = []
for follower in tqdm(new_followers):
if bot.send_message(MESSAGE, follower):
new_notified_users.append(follower)
if new_notified_users:
print('Updating notified users list')
with open(NOTIFIED_USERS_PATH, 'a') as fo:
new_notified_users_string = '\n'.join(new_notified_users)
fo.write('\n{users}'.format(
users=new_notified_users_string
))
|
|
062d44d75e3fa7d55e620e6ce4c4af336c5b6917
|
recipes/skia_buildbot.py
|
recipes/skia_buildbot.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class SkiaBuildbot(recipe_util.Recipe):
"""Basic Recipe class for the Skia Buildbot repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'buildbot',
'url' : 'https://skia.googlesource.com/buildbot.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'buildbot'
def main(argv=None):
return SkiaBuildbot().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add fetch recipe for Skia's Buildbot repository.
|
Add fetch recipe for Skia's Buildbot repository.
Tested with the following command lines:
$ cd
$ mkdir skia_test
$ cd skia_test
$ fetch skia_buildbot
$ cd skia_buildbot
# confirm the repo is what one would expect.
BUG=None
TEST=see above
R=agable@chromium.org
Review URL: https://codereview.chromium.org/777513002
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@293283 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
Python
|
bsd-3-clause
|
svn2github/chromium-depot-tools,svn2github/chromium-depot-tools,svn2github/chromium-depot-tools
|
Add fetch recipe for Skia's Buildbot repository.
Tested with the following command lines:
$ cd
$ mkdir skia_test
$ cd skia_test
$ fetch skia_buildbot
$ cd skia_buildbot
# confirm the repo is what one would expect.
BUG=None
TEST=see above
R=agable@chromium.org
Review URL: https://codereview.chromium.org/777513002
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@293283 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class SkiaBuildbot(recipe_util.Recipe):
"""Basic Recipe class for the Skia Buildbot repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'buildbot',
'url' : 'https://skia.googlesource.com/buildbot.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'buildbot'
def main(argv=None):
return SkiaBuildbot().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add fetch recipe for Skia's Buildbot repository.
Tested with the following command lines:
$ cd
$ mkdir skia_test
$ cd skia_test
$ fetch skia_buildbot
$ cd skia_buildbot
# confirm the repo is what one would expect.
BUG=None
TEST=see above
R=agable@chromium.org
Review URL: https://codereview.chromium.org/777513002
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@293283 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class SkiaBuildbot(recipe_util.Recipe):
"""Basic Recipe class for the Skia Buildbot repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'buildbot',
'url' : 'https://skia.googlesource.com/buildbot.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'buildbot'
def main(argv=None):
return SkiaBuildbot().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add fetch recipe for Skia's Buildbot repository.
Tested with the following command lines:
$ cd
$ mkdir skia_test
$ cd skia_test
$ fetch skia_buildbot
$ cd skia_buildbot
# confirm the repo is what one would expect.
BUG=None
TEST=see above
R=agable@chromium.org
Review URL: https://codereview.chromium.org/777513002
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@293283 4ff67af0-8c30-449e-8e8b-ad334ec8d88c# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class SkiaBuildbot(recipe_util.Recipe):
"""Basic Recipe class for the Skia Buildbot repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'buildbot',
'url' : 'https://skia.googlesource.com/buildbot.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'buildbot'
def main(argv=None):
return SkiaBuildbot().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add fetch recipe for Skia's Buildbot repository.
Tested with the following command lines:
$ cd
$ mkdir skia_test
$ cd skia_test
$ fetch skia_buildbot
$ cd skia_buildbot
# confirm the repo is what one would expect.
BUG=None
TEST=see above
R=agable@chromium.org
Review URL: https://codereview.chromium.org/777513002
git-svn-id: bd64dd6fa6f3f0ed0c0666d1018379882b742947@293283 4ff67af0-8c30-449e-8e8b-ad334ec8d88c<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class SkiaBuildbot(recipe_util.Recipe):
"""Basic Recipe class for the Skia Buildbot repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'buildbot',
'url' : 'https://skia.googlesource.com/buildbot.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'buildbot'
def main(argv=None):
return SkiaBuildbot().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
e4d6b055be724f1ab2614fe68da8a09dfcbaa2d5
|
tests/test_tags_functional.py
|
tests/test_tags_functional.py
|
"""Test the dataset tag functionality."""
import pytest
from . import tmp_dir_fixture
def test_tags_functional(tmp_dir_fixture):
from dtoolcore import DataSetCreator, DataSet
with DataSetCreator(name="empty-test-ds", base_uri=tmp_dir_fixture) as c:
# Test put on proto dataset.
c.put_tag("testing")
dataset = DataSet.from_uri(uri)
assert dataset.get_tags() == []
dataset.put_tag("amazing")
dataset.put_tag("stuff")
assert dataset.get_tags() == ["amazing", "stuff"]
c.delete_tag("stuff")
assert dataset.get_tags() == ["amazing"]
# Tags can only be strings.
with pytest.raises(ValueError):
dataset.put_tag(1)
# Tags need to adhere to the utils.name_is_valid() rules.
from dtoolcore import DtoolCoreInvalidNameError
with pytest.raises(DtoolCoreInvalidNameError):
dataset.put_tag("!invalid")
|
Add failing funcitonal test for tags
|
Add failing funcitonal test for tags
|
Python
|
mit
|
JIC-CSB/dtoolcore
|
Add failing funcitonal test for tags
|
"""Test the dataset tag functionality."""
import pytest
from . import tmp_dir_fixture
def test_tags_functional(tmp_dir_fixture):
from dtoolcore import DataSetCreator, DataSet
with DataSetCreator(name="empty-test-ds", base_uri=tmp_dir_fixture) as c:
# Test put on proto dataset.
c.put_tag("testing")
dataset = DataSet.from_uri(uri)
assert dataset.get_tags() == []
dataset.put_tag("amazing")
dataset.put_tag("stuff")
assert dataset.get_tags() == ["amazing", "stuff"]
c.delete_tag("stuff")
assert dataset.get_tags() == ["amazing"]
# Tags can only be strings.
with pytest.raises(ValueError):
dataset.put_tag(1)
# Tags need to adhere to the utils.name_is_valid() rules.
from dtoolcore import DtoolCoreInvalidNameError
with pytest.raises(DtoolCoreInvalidNameError):
dataset.put_tag("!invalid")
|
<commit_before><commit_msg>Add failing funcitonal test for tags<commit_after>
|
"""Test the dataset tag functionality."""
import pytest
from . import tmp_dir_fixture
def test_tags_functional(tmp_dir_fixture):
from dtoolcore import DataSetCreator, DataSet
with DataSetCreator(name="empty-test-ds", base_uri=tmp_dir_fixture) as c:
# Test put on proto dataset.
c.put_tag("testing")
dataset = DataSet.from_uri(uri)
assert dataset.get_tags() == []
dataset.put_tag("amazing")
dataset.put_tag("stuff")
assert dataset.get_tags() == ["amazing", "stuff"]
c.delete_tag("stuff")
assert dataset.get_tags() == ["amazing"]
# Tags can only be strings.
with pytest.raises(ValueError):
dataset.put_tag(1)
# Tags need to adhere to the utils.name_is_valid() rules.
from dtoolcore import DtoolCoreInvalidNameError
with pytest.raises(DtoolCoreInvalidNameError):
dataset.put_tag("!invalid")
|
Add failing funcitonal test for tags"""Test the dataset tag functionality."""
import pytest
from . import tmp_dir_fixture
def test_tags_functional(tmp_dir_fixture):
from dtoolcore import DataSetCreator, DataSet
with DataSetCreator(name="empty-test-ds", base_uri=tmp_dir_fixture) as c:
# Test put on proto dataset.
c.put_tag("testing")
dataset = DataSet.from_uri(uri)
assert dataset.get_tags() == []
dataset.put_tag("amazing")
dataset.put_tag("stuff")
assert dataset.get_tags() == ["amazing", "stuff"]
c.delete_tag("stuff")
assert dataset.get_tags() == ["amazing"]
# Tags can only be strings.
with pytest.raises(ValueError):
dataset.put_tag(1)
# Tags need to adhere to the utils.name_is_valid() rules.
from dtoolcore import DtoolCoreInvalidNameError
with pytest.raises(DtoolCoreInvalidNameError):
dataset.put_tag("!invalid")
|
<commit_before><commit_msg>Add failing funcitonal test for tags<commit_after>"""Test the dataset tag functionality."""
import pytest
from . import tmp_dir_fixture
def test_tags_functional(tmp_dir_fixture):
from dtoolcore import DataSetCreator, DataSet
with DataSetCreator(name="empty-test-ds", base_uri=tmp_dir_fixture) as c:
# Test put on proto dataset.
c.put_tag("testing")
dataset = DataSet.from_uri(uri)
assert dataset.get_tags() == []
dataset.put_tag("amazing")
dataset.put_tag("stuff")
assert dataset.get_tags() == ["amazing", "stuff"]
c.delete_tag("stuff")
assert dataset.get_tags() == ["amazing"]
# Tags can only be strings.
with pytest.raises(ValueError):
dataset.put_tag(1)
# Tags need to adhere to the utils.name_is_valid() rules.
from dtoolcore import DtoolCoreInvalidNameError
with pytest.raises(DtoolCoreInvalidNameError):
dataset.put_tag("!invalid")
|
|
5263d39639f1596ee1b0cdf26437b845b6dd1661
|
app.py
|
app.py
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import qrcode
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=4,
)
qr.add_data('ASDFASDFASDFASDFtest')
qr.make(fit=True)
fnt = ImageFont.truetype("FreeMono.ttf", 16)
base = qr.make_image()
txt = Image.new('RGB', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
out = Image.composite(base, txt, 1)
out.show()
|
Test qrcode and PIL libraries
|
Test qrcode and PIL libraries
|
Python
|
mit
|
Nslaver/GeoPQRGen
|
Test qrcode and PIL libraries
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import qrcode
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=4,
)
qr.add_data('ASDFASDFASDFASDFtest')
qr.make(fit=True)
fnt = ImageFont.truetype("FreeMono.ttf", 16)
base = qr.make_image()
txt = Image.new('RGB', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
out = Image.composite(base, txt, 1)
out.show()
|
<commit_before><commit_msg>Test qrcode and PIL libraries<commit_after>
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import qrcode
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=4,
)
qr.add_data('ASDFASDFASDFASDFtest')
qr.make(fit=True)
fnt = ImageFont.truetype("FreeMono.ttf", 16)
base = qr.make_image()
txt = Image.new('RGB', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
out = Image.composite(base, txt, 1)
out.show()
|
Test qrcode and PIL librariesfrom PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import qrcode
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=4,
)
qr.add_data('ASDFASDFASDFASDFtest')
qr.make(fit=True)
fnt = ImageFont.truetype("FreeMono.ttf", 16)
base = qr.make_image()
txt = Image.new('RGB', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
out = Image.composite(base, txt, 1)
out.show()
|
<commit_before><commit_msg>Test qrcode and PIL libraries<commit_after>from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import qrcode
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=20,
border=4,
)
qr.add_data('ASDFASDFASDFASDFtest')
qr.make(fit=True)
fnt = ImageFont.truetype("FreeMono.ttf", 16)
base = qr.make_image()
txt = Image.new('RGB', base.size, (255,255,255,0))
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
# get a drawing context
d = ImageDraw.Draw(txt)
# draw text, half opacity
d.text((10,10), "Hello", font=fnt, fill=(255,255,255,128))
# draw text, full opacity
d.text((10,60), "World", font=fnt, fill=(255,255,255,255))
out = Image.composite(base, txt, 1)
out.show()
|
|
8b9730dce9ed50d764b86804609c17235044bbfd
|
heapsort.py
|
heapsort.py
|
__author__ = 'harsh'
LT = 0
GT = 1
def compare_ele(comp_type, ele1, ele2):
if comp_type == LT:
return ele1 < ele2
elif comp_type == GT:
return ele1 > ele2
raise Exception("Compare type Undefined")
def heapsort(lst, comp_type=LT):
"""
:param lst:
"""
#heapify
for start in range((len(lst)-2)/2, -1, -1):
siftdown_min(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown_min(lst, 0, end-1)
return lst
def siftdown(lst, start, end, comp_type):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and compare_ele(lst[child], lst[child + 1], comp_type):
child += 1
if compare_ele(lst[root], lst[child], ~comp_type):
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def siftdown_min(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and lst[child] > lst[child + 1]:
child += 1
if lst[root] > lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def main():
arr = [7, 5, 3, 5, 2, 5, 3, 0]
sorted_lst = heapsort(arr)
print sorted_lst
main()
|
Add min and max heap sort
|
Add min and max heap sort
|
Python
|
mit
|
hs634/algorithms,hs634/algorithms
|
Add min and max heap sort
|
__author__ = 'harsh'
LT = 0
GT = 1
def compare_ele(comp_type, ele1, ele2):
if comp_type == LT:
return ele1 < ele2
elif comp_type == GT:
return ele1 > ele2
raise Exception("Compare type Undefined")
def heapsort(lst, comp_type=LT):
"""
:param lst:
"""
#heapify
for start in range((len(lst)-2)/2, -1, -1):
siftdown_min(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown_min(lst, 0, end-1)
return lst
def siftdown(lst, start, end, comp_type):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and compare_ele(lst[child], lst[child + 1], comp_type):
child += 1
if compare_ele(lst[root], lst[child], ~comp_type):
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def siftdown_min(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and lst[child] > lst[child + 1]:
child += 1
if lst[root] > lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def main():
arr = [7, 5, 3, 5, 2, 5, 3, 0]
sorted_lst = heapsort(arr)
print sorted_lst
main()
|
<commit_before><commit_msg>Add min and max heap sort<commit_after>
|
__author__ = 'harsh'
LT = 0
GT = 1
def compare_ele(comp_type, ele1, ele2):
if comp_type == LT:
return ele1 < ele2
elif comp_type == GT:
return ele1 > ele2
raise Exception("Compare type Undefined")
def heapsort(lst, comp_type=LT):
"""
:param lst:
"""
#heapify
for start in range((len(lst)-2)/2, -1, -1):
siftdown_min(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown_min(lst, 0, end-1)
return lst
def siftdown(lst, start, end, comp_type):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and compare_ele(lst[child], lst[child + 1], comp_type):
child += 1
if compare_ele(lst[root], lst[child], ~comp_type):
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def siftdown_min(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and lst[child] > lst[child + 1]:
child += 1
if lst[root] > lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def main():
arr = [7, 5, 3, 5, 2, 5, 3, 0]
sorted_lst = heapsort(arr)
print sorted_lst
main()
|
Add min and max heap sort__author__ = 'harsh'
LT = 0
GT = 1
def compare_ele(comp_type, ele1, ele2):
if comp_type == LT:
return ele1 < ele2
elif comp_type == GT:
return ele1 > ele2
raise Exception("Compare type Undefined")
def heapsort(lst, comp_type=LT):
"""
:param lst:
"""
#heapify
for start in range((len(lst)-2)/2, -1, -1):
siftdown_min(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown_min(lst, 0, end-1)
return lst
def siftdown(lst, start, end, comp_type):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and compare_ele(lst[child], lst[child + 1], comp_type):
child += 1
if compare_ele(lst[root], lst[child], ~comp_type):
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def siftdown_min(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and lst[child] > lst[child + 1]:
child += 1
if lst[root] > lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def main():
arr = [7, 5, 3, 5, 2, 5, 3, 0]
sorted_lst = heapsort(arr)
print sorted_lst
main()
|
<commit_before><commit_msg>Add min and max heap sort<commit_after>__author__ = 'harsh'
LT = 0
GT = 1
def compare_ele(comp_type, ele1, ele2):
if comp_type == LT:
return ele1 < ele2
elif comp_type == GT:
return ele1 > ele2
raise Exception("Compare type Undefined")
def heapsort(lst, comp_type=LT):
"""
:param lst:
"""
#heapify
for start in range((len(lst)-2)/2, -1, -1):
siftdown_min(lst, start, len(lst)-1)
for end in range(len(lst)-1, 0, -1):
lst[end], lst[0] = lst[0], lst[end]
siftdown_min(lst, 0, end-1)
return lst
def siftdown(lst, start, end, comp_type):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and compare_ele(lst[child], lst[child + 1], comp_type):
child += 1
if compare_ele(lst[root], lst[child], ~comp_type):
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def siftdown_min(lst, start, end):
root = start
while True:
child = root * 2 + 1
if child > end:
break
if child + 1 <= end and lst[child] > lst[child + 1]:
child += 1
if lst[root] > lst[child]:
lst[root], lst[child] = lst[child], lst[root]
root = child
else:
break
def main():
arr = [7, 5, 3, 5, 2, 5, 3, 0]
sorted_lst = heapsort(arr)
print sorted_lst
main()
|
|
71d33cf6215a81ee2db70ae9fd214aa725cde401
|
hybrid-mixed/full-hybrid.py
|
hybrid-mixed/full-hybrid.py
|
from firedrake import *
n = 4
mesh = UnitSquareMesh(n, n)
n = FacetNormal(mesh)
RTd = FunctionSpace(mesh, BrokenElement(FiniteElement("RT", triangle, 1)))
DG = FunctionSpace(mesh, "DG", 0)
T = FunctionSpace(mesh, "HDiv Trace", 0)
Wd = RTd * DG * T
sigma, u, lambdar = TrialFunctions(Wd)
tau, v, gammar = TestFunctions(Wd)
bcs = DirichletBC(Wd.sub(2), Constant(0.0), "on_boundary")
adx = (dot(sigma, tau) - div(tau)*u + div(sigma)*v + u*v)*dx
adS = (jump(sigma, n=n)*gammar('+') + jump(tau, n=n)*lambdar('+'))*dS
a = adx + adS
f = Function(DG)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
L = v*f*dx
w = Function(Wd)
params = {'mat_type': 'matfree',
'ksp_type': 'gmres',
'pc_type': 'python',
'ksp_monitor': True,
'pc_python_type': 'firedrake.HybridStaticCondensationPC',
'hybrid_sc': {'ksp_type': 'preonly',
'pc_type': 'lu'}}
solve(a == L, w, bcs=bcs, solver_parameters=params)
sigma_h, u_h, lambdar_h = w.split()
File("hybrid-mixed-test.pvd").write(sigma_h, u_h)
|
Test new static condensation pc on full mixed-hybrid system
|
Test new static condensation pc on full mixed-hybrid system
|
Python
|
mit
|
thomasgibson/tabula-rasa
|
Test new static condensation pc on full mixed-hybrid system
|
from firedrake import *
n = 4
mesh = UnitSquareMesh(n, n)
n = FacetNormal(mesh)
RTd = FunctionSpace(mesh, BrokenElement(FiniteElement("RT", triangle, 1)))
DG = FunctionSpace(mesh, "DG", 0)
T = FunctionSpace(mesh, "HDiv Trace", 0)
Wd = RTd * DG * T
sigma, u, lambdar = TrialFunctions(Wd)
tau, v, gammar = TestFunctions(Wd)
bcs = DirichletBC(Wd.sub(2), Constant(0.0), "on_boundary")
adx = (dot(sigma, tau) - div(tau)*u + div(sigma)*v + u*v)*dx
adS = (jump(sigma, n=n)*gammar('+') + jump(tau, n=n)*lambdar('+'))*dS
a = adx + adS
f = Function(DG)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
L = v*f*dx
w = Function(Wd)
params = {'mat_type': 'matfree',
'ksp_type': 'gmres',
'pc_type': 'python',
'ksp_monitor': True,
'pc_python_type': 'firedrake.HybridStaticCondensationPC',
'hybrid_sc': {'ksp_type': 'preonly',
'pc_type': 'lu'}}
solve(a == L, w, bcs=bcs, solver_parameters=params)
sigma_h, u_h, lambdar_h = w.split()
File("hybrid-mixed-test.pvd").write(sigma_h, u_h)
|
<commit_before><commit_msg>Test new static condensation pc on full mixed-hybrid system<commit_after>
|
from firedrake import *
n = 4
mesh = UnitSquareMesh(n, n)
n = FacetNormal(mesh)
RTd = FunctionSpace(mesh, BrokenElement(FiniteElement("RT", triangle, 1)))
DG = FunctionSpace(mesh, "DG", 0)
T = FunctionSpace(mesh, "HDiv Trace", 0)
Wd = RTd * DG * T
sigma, u, lambdar = TrialFunctions(Wd)
tau, v, gammar = TestFunctions(Wd)
bcs = DirichletBC(Wd.sub(2), Constant(0.0), "on_boundary")
adx = (dot(sigma, tau) - div(tau)*u + div(sigma)*v + u*v)*dx
adS = (jump(sigma, n=n)*gammar('+') + jump(tau, n=n)*lambdar('+'))*dS
a = adx + adS
f = Function(DG)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
L = v*f*dx
w = Function(Wd)
params = {'mat_type': 'matfree',
'ksp_type': 'gmres',
'pc_type': 'python',
'ksp_monitor': True,
'pc_python_type': 'firedrake.HybridStaticCondensationPC',
'hybrid_sc': {'ksp_type': 'preonly',
'pc_type': 'lu'}}
solve(a == L, w, bcs=bcs, solver_parameters=params)
sigma_h, u_h, lambdar_h = w.split()
File("hybrid-mixed-test.pvd").write(sigma_h, u_h)
|
Test new static condensation pc on full mixed-hybrid systemfrom firedrake import *
n = 4
mesh = UnitSquareMesh(n, n)
n = FacetNormal(mesh)
RTd = FunctionSpace(mesh, BrokenElement(FiniteElement("RT", triangle, 1)))
DG = FunctionSpace(mesh, "DG", 0)
T = FunctionSpace(mesh, "HDiv Trace", 0)
Wd = RTd * DG * T
sigma, u, lambdar = TrialFunctions(Wd)
tau, v, gammar = TestFunctions(Wd)
bcs = DirichletBC(Wd.sub(2), Constant(0.0), "on_boundary")
adx = (dot(sigma, tau) - div(tau)*u + div(sigma)*v + u*v)*dx
adS = (jump(sigma, n=n)*gammar('+') + jump(tau, n=n)*lambdar('+'))*dS
a = adx + adS
f = Function(DG)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
L = v*f*dx
w = Function(Wd)
params = {'mat_type': 'matfree',
'ksp_type': 'gmres',
'pc_type': 'python',
'ksp_monitor': True,
'pc_python_type': 'firedrake.HybridStaticCondensationPC',
'hybrid_sc': {'ksp_type': 'preonly',
'pc_type': 'lu'}}
solve(a == L, w, bcs=bcs, solver_parameters=params)
sigma_h, u_h, lambdar_h = w.split()
File("hybrid-mixed-test.pvd").write(sigma_h, u_h)
|
<commit_before><commit_msg>Test new static condensation pc on full mixed-hybrid system<commit_after>from firedrake import *
n = 4
mesh = UnitSquareMesh(n, n)
n = FacetNormal(mesh)
RTd = FunctionSpace(mesh, BrokenElement(FiniteElement("RT", triangle, 1)))
DG = FunctionSpace(mesh, "DG", 0)
T = FunctionSpace(mesh, "HDiv Trace", 0)
Wd = RTd * DG * T
sigma, u, lambdar = TrialFunctions(Wd)
tau, v, gammar = TestFunctions(Wd)
bcs = DirichletBC(Wd.sub(2), Constant(0.0), "on_boundary")
adx = (dot(sigma, tau) - div(tau)*u + div(sigma)*v + u*v)*dx
adS = (jump(sigma, n=n)*gammar('+') + jump(tau, n=n)*lambdar('+'))*dS
a = adx + adS
f = Function(DG)
f.interpolate(Expression("(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2)"))
L = v*f*dx
w = Function(Wd)
params = {'mat_type': 'matfree',
'ksp_type': 'gmres',
'pc_type': 'python',
'ksp_monitor': True,
'pc_python_type': 'firedrake.HybridStaticCondensationPC',
'hybrid_sc': {'ksp_type': 'preonly',
'pc_type': 'lu'}}
solve(a == L, w, bcs=bcs, solver_parameters=params)
sigma_h, u_h, lambdar_h = w.split()
File("hybrid-mixed-test.pvd").write(sigma_h, u_h)
|
|
2cae59076f2fa32b7284bfe4b8061aa80b4da2a9
|
openprescribing/matrixstore/tests/contextmanagers.py
|
openprescribing/matrixstore/tests/contextmanagers.py
|
from contextlib import contextmanager
from matrixstore.tests.matrixstore_factory import (
matrixstore_from_data_factory,
patch_global_matrixstore,
)
@contextmanager
def patched_global_matrixstore(matrixstore):
"""Context manaager that patches the global MatrixStore instance with the supplied
matrixstore.
"""
stop_patching = patch_global_matrixstore(matrixstore)
try:
yield
finally:
stop_patching()
@contextmanager
def patched_global_matrixstore_from_data_factory(factory):
"""Context manaager that patches the global MatrixStore instance with one built from
the supplied factory.
"""
matrixstore = matrixstore_from_data_factory(factory)
with patched_global_matrixstore(matrixstore):
yield
|
Add context managers for patching global matrixstore in tests
|
Add context managers for patching global matrixstore in tests
|
Python
|
mit
|
ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc
|
Add context managers for patching global matrixstore in tests
|
from contextlib import contextmanager
from matrixstore.tests.matrixstore_factory import (
matrixstore_from_data_factory,
patch_global_matrixstore,
)
@contextmanager
def patched_global_matrixstore(matrixstore):
"""Context manaager that patches the global MatrixStore instance with the supplied
matrixstore.
"""
stop_patching = patch_global_matrixstore(matrixstore)
try:
yield
finally:
stop_patching()
@contextmanager
def patched_global_matrixstore_from_data_factory(factory):
"""Context manaager that patches the global MatrixStore instance with one built from
the supplied factory.
"""
matrixstore = matrixstore_from_data_factory(factory)
with patched_global_matrixstore(matrixstore):
yield
|
<commit_before><commit_msg>Add context managers for patching global matrixstore in tests<commit_after>
|
from contextlib import contextmanager
from matrixstore.tests.matrixstore_factory import (
matrixstore_from_data_factory,
patch_global_matrixstore,
)
@contextmanager
def patched_global_matrixstore(matrixstore):
"""Context manaager that patches the global MatrixStore instance with the supplied
matrixstore.
"""
stop_patching = patch_global_matrixstore(matrixstore)
try:
yield
finally:
stop_patching()
@contextmanager
def patched_global_matrixstore_from_data_factory(factory):
"""Context manaager that patches the global MatrixStore instance with one built from
the supplied factory.
"""
matrixstore = matrixstore_from_data_factory(factory)
with patched_global_matrixstore(matrixstore):
yield
|
Add context managers for patching global matrixstore in testsfrom contextlib import contextmanager
from matrixstore.tests.matrixstore_factory import (
matrixstore_from_data_factory,
patch_global_matrixstore,
)
@contextmanager
def patched_global_matrixstore(matrixstore):
"""Context manaager that patches the global MatrixStore instance with the supplied
matrixstore.
"""
stop_patching = patch_global_matrixstore(matrixstore)
try:
yield
finally:
stop_patching()
@contextmanager
def patched_global_matrixstore_from_data_factory(factory):
"""Context manaager that patches the global MatrixStore instance with one built from
the supplied factory.
"""
matrixstore = matrixstore_from_data_factory(factory)
with patched_global_matrixstore(matrixstore):
yield
|
<commit_before><commit_msg>Add context managers for patching global matrixstore in tests<commit_after>from contextlib import contextmanager
from matrixstore.tests.matrixstore_factory import (
matrixstore_from_data_factory,
patch_global_matrixstore,
)
@contextmanager
def patched_global_matrixstore(matrixstore):
"""Context manaager that patches the global MatrixStore instance with the supplied
matrixstore.
"""
stop_patching = patch_global_matrixstore(matrixstore)
try:
yield
finally:
stop_patching()
@contextmanager
def patched_global_matrixstore_from_data_factory(factory):
"""Context manaager that patches the global MatrixStore instance with one built from
the supplied factory.
"""
matrixstore = matrixstore_from_data_factory(factory)
with patched_global_matrixstore(matrixstore):
yield
|
|
89b0a51e7443376794875d4ca014503e69ee847a
|
wsgi.py
|
wsgi.py
|
import os
from form import app as application
if 'OPENSHIFT_DATA_DIR' in os.environ:
datadir = os.environ['OPENSHIFT_DATA_DIR']
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + datadir + '/form.db'
|
Add WSGI entrypoint for OpenShift
|
Add WSGI entrypoint for OpenShift
|
Python
|
mit
|
torsava/pyconcz-form,torsava/pyconcz-form
|
Add WSGI entrypoint for OpenShift
|
import os
from form import app as application
if 'OPENSHIFT_DATA_DIR' in os.environ:
datadir = os.environ['OPENSHIFT_DATA_DIR']
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + datadir + '/form.db'
|
<commit_before><commit_msg>Add WSGI entrypoint for OpenShift<commit_after>
|
import os
from form import app as application
if 'OPENSHIFT_DATA_DIR' in os.environ:
datadir = os.environ['OPENSHIFT_DATA_DIR']
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + datadir + '/form.db'
|
Add WSGI entrypoint for OpenShiftimport os
from form import app as application
if 'OPENSHIFT_DATA_DIR' in os.environ:
datadir = os.environ['OPENSHIFT_DATA_DIR']
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + datadir + '/form.db'
|
<commit_before><commit_msg>Add WSGI entrypoint for OpenShift<commit_after>import os
from form import app as application
if 'OPENSHIFT_DATA_DIR' in os.environ:
datadir = os.environ['OPENSHIFT_DATA_DIR']
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + datadir + '/form.db'
|
|
2961c62d06f65e2aed15dac22fdd4d9a0f619668
|
util/dataset_times.py
|
util/dataset_times.py
|
#!/usr/bin/env python3
import datetime
import re
import sys
import time
timestamp_pattern = r'(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})'
message_pattern = r'(?P<message>.*)'
log_re = re.compile(r'^%s: \w+: %s\s*$' % (timestamp_pattern, message_pattern))
task_pattern = r'(?P<dataset>\w+), (?P<task>\d+)'
task_re = re.compile(r'^(Assigning|Slave \d+ report).*: %s\s*$' % task_pattern)
class Interval(object):
def __init__(self, first):
self.first = first
self.last = first
def update(self, last):
self.last = last
def seconds(self):
delta = parse_timestamp(self.last) - parse_timestamp(self.first)
return delta.total_seconds()
def parse_timestamp(timestamp):
"""Convert a timestamp string to a datetime object."""
sec, ms = timestamp.split(',')
microseconds = 1000 * int(ms)
fields = time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)
return datetime.datetime(*fields)
def parse_dataset(string):
mo = task_re.search(string)
if mo:
return mo.group('dataset')
def load():
intervals = []
dataset_map = {}
for line in sys.stdin:
mo = log_re.search(line)
if mo:
timestamp = mo.group('timestamp')
message = mo.group('message')
dataset = parse_dataset(message)
if dataset:
try:
dataset_map[dataset].update(timestamp)
except KeyError:
interval = Interval(timestamp)
dataset_map[dataset] = interval
intervals.append((dataset, interval))
else:
print('Malformed input:')
print('"%s"' % line)
return intervals
def timing():
for dataset, interval in load():
print(dataset, ' ', interval.seconds())
if __name__ == '__main__':
timing()
# vim: et sw=4 sts=4
|
Add a script to parse the logs and tabulate the amount of time per dataset
|
Add a script to parse the logs and tabulate the amount of time per dataset
|
Python
|
apache-2.0
|
kseppi/mrs-mapreduce,byu-aml-lab/mrs-mapreduce
|
Add a script to parse the logs and tabulate the amount of time per dataset
|
#!/usr/bin/env python3
import datetime
import re
import sys
import time
timestamp_pattern = r'(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})'
message_pattern = r'(?P<message>.*)'
log_re = re.compile(r'^%s: \w+: %s\s*$' % (timestamp_pattern, message_pattern))
task_pattern = r'(?P<dataset>\w+), (?P<task>\d+)'
task_re = re.compile(r'^(Assigning|Slave \d+ report).*: %s\s*$' % task_pattern)
class Interval(object):
def __init__(self, first):
self.first = first
self.last = first
def update(self, last):
self.last = last
def seconds(self):
delta = parse_timestamp(self.last) - parse_timestamp(self.first)
return delta.total_seconds()
def parse_timestamp(timestamp):
"""Convert a timestamp string to a datetime object."""
sec, ms = timestamp.split(',')
microseconds = 1000 * int(ms)
fields = time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)
return datetime.datetime(*fields)
def parse_dataset(string):
mo = task_re.search(string)
if mo:
return mo.group('dataset')
def load():
intervals = []
dataset_map = {}
for line in sys.stdin:
mo = log_re.search(line)
if mo:
timestamp = mo.group('timestamp')
message = mo.group('message')
dataset = parse_dataset(message)
if dataset:
try:
dataset_map[dataset].update(timestamp)
except KeyError:
interval = Interval(timestamp)
dataset_map[dataset] = interval
intervals.append((dataset, interval))
else:
print('Malformed input:')
print('"%s"' % line)
return intervals
def timing():
for dataset, interval in load():
print(dataset, ' ', interval.seconds())
if __name__ == '__main__':
timing()
# vim: et sw=4 sts=4
|
<commit_before><commit_msg>Add a script to parse the logs and tabulate the amount of time per dataset<commit_after>
|
#!/usr/bin/env python3
import datetime
import re
import sys
import time
timestamp_pattern = r'(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})'
message_pattern = r'(?P<message>.*)'
log_re = re.compile(r'^%s: \w+: %s\s*$' % (timestamp_pattern, message_pattern))
task_pattern = r'(?P<dataset>\w+), (?P<task>\d+)'
task_re = re.compile(r'^(Assigning|Slave \d+ report).*: %s\s*$' % task_pattern)
class Interval(object):
def __init__(self, first):
self.first = first
self.last = first
def update(self, last):
self.last = last
def seconds(self):
delta = parse_timestamp(self.last) - parse_timestamp(self.first)
return delta.total_seconds()
def parse_timestamp(timestamp):
"""Convert a timestamp string to a datetime object."""
sec, ms = timestamp.split(',')
microseconds = 1000 * int(ms)
fields = time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)
return datetime.datetime(*fields)
def parse_dataset(string):
mo = task_re.search(string)
if mo:
return mo.group('dataset')
def load():
intervals = []
dataset_map = {}
for line in sys.stdin:
mo = log_re.search(line)
if mo:
timestamp = mo.group('timestamp')
message = mo.group('message')
dataset = parse_dataset(message)
if dataset:
try:
dataset_map[dataset].update(timestamp)
except KeyError:
interval = Interval(timestamp)
dataset_map[dataset] = interval
intervals.append((dataset, interval))
else:
print('Malformed input:')
print('"%s"' % line)
return intervals
def timing():
for dataset, interval in load():
print(dataset, ' ', interval.seconds())
if __name__ == '__main__':
timing()
# vim: et sw=4 sts=4
|
Add a script to parse the logs and tabulate the amount of time per dataset#!/usr/bin/env python3
import datetime
import re
import sys
import time
timestamp_pattern = r'(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})'
message_pattern = r'(?P<message>.*)'
log_re = re.compile(r'^%s: \w+: %s\s*$' % (timestamp_pattern, message_pattern))
task_pattern = r'(?P<dataset>\w+), (?P<task>\d+)'
task_re = re.compile(r'^(Assigning|Slave \d+ report).*: %s\s*$' % task_pattern)
class Interval(object):
def __init__(self, first):
self.first = first
self.last = first
def update(self, last):
self.last = last
def seconds(self):
delta = parse_timestamp(self.last) - parse_timestamp(self.first)
return delta.total_seconds()
def parse_timestamp(timestamp):
"""Convert a timestamp string to a datetime object."""
sec, ms = timestamp.split(',')
microseconds = 1000 * int(ms)
fields = time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)
return datetime.datetime(*fields)
def parse_dataset(string):
mo = task_re.search(string)
if mo:
return mo.group('dataset')
def load():
intervals = []
dataset_map = {}
for line in sys.stdin:
mo = log_re.search(line)
if mo:
timestamp = mo.group('timestamp')
message = mo.group('message')
dataset = parse_dataset(message)
if dataset:
try:
dataset_map[dataset].update(timestamp)
except KeyError:
interval = Interval(timestamp)
dataset_map[dataset] = interval
intervals.append((dataset, interval))
else:
print('Malformed input:')
print('"%s"' % line)
return intervals
def timing():
for dataset, interval in load():
print(dataset, ' ', interval.seconds())
if __name__ == '__main__':
timing()
# vim: et sw=4 sts=4
|
<commit_before><commit_msg>Add a script to parse the logs and tabulate the amount of time per dataset<commit_after>#!/usr/bin/env python3
import datetime
import re
import sys
import time
timestamp_pattern = r'(?P<timestamp>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3})'
message_pattern = r'(?P<message>.*)'
log_re = re.compile(r'^%s: \w+: %s\s*$' % (timestamp_pattern, message_pattern))
task_pattern = r'(?P<dataset>\w+), (?P<task>\d+)'
task_re = re.compile(r'^(Assigning|Slave \d+ report).*: %s\s*$' % task_pattern)
class Interval(object):
def __init__(self, first):
self.first = first
self.last = first
def update(self, last):
self.last = last
def seconds(self):
delta = parse_timestamp(self.last) - parse_timestamp(self.first)
return delta.total_seconds()
def parse_timestamp(timestamp):
"""Convert a timestamp string to a datetime object."""
sec, ms = timestamp.split(',')
microseconds = 1000 * int(ms)
fields = time.strptime(sec, '%Y-%m-%d %H:%M:%S')[0:6] + (microseconds,)
return datetime.datetime(*fields)
def parse_dataset(string):
mo = task_re.search(string)
if mo:
return mo.group('dataset')
def load():
intervals = []
dataset_map = {}
for line in sys.stdin:
mo = log_re.search(line)
if mo:
timestamp = mo.group('timestamp')
message = mo.group('message')
dataset = parse_dataset(message)
if dataset:
try:
dataset_map[dataset].update(timestamp)
except KeyError:
interval = Interval(timestamp)
dataset_map[dataset] = interval
intervals.append((dataset, interval))
else:
print('Malformed input:')
print('"%s"' % line)
return intervals
def timing():
for dataset, interval in load():
print(dataset, ' ', interval.seconds())
if __name__ == '__main__':
timing()
# vim: et sw=4 sts=4
|
|
801f7ebd01a814c3278a66ae4bc0afea2bbb70a6
|
tests/python/test_context/test_stack/test_sorted.py
|
tests/python/test_context/test_stack/test_sorted.py
|
# encoding: utf-8
import unittest
from logging_utils._compat import mock
from logging_utils.context.stack.sorted import SortedContextStack
class SimpleContextStackTest(unittest.TestCase):
def setUp(self):
self.inner = mock.MagicMock(spec=SortedContextStack)
self.inner.__str__.return_value = "foo"
self.inner.__iter__.return_value = [("b", 1), ("a", 2), ("a", 1)]
self.stack = SortedContextStack(self.inner)
def test_init_requires_one_arg(self):
self.assertRaises(TypeError, SortedContextStack)
def test_pop_calls_inner_stack(self):
self.stack.pop()
self.inner.pop.assert_called_once_with()
def test_push_calls_inner_stack(self):
self.stack.push(dict(a=1))
self.inner.push.assert_called_once_with(dict(a=1))
def test_str_calls_inner_stack_and_does_not_manipulate_output(self):
out = str(self.stack)
self.inner.__str__.assert_called_once_with()
self.assertEqual("foo", out)
def test_iter_calls_inner_iterable(self):
iter(self.stack)
self.inner.__iter__.assert_called_once_with()
def test_iter_returns_sorted_iterable(self):
items = iter(self.stack)
self.inner.__iter__.assert_called_once_with()
self.assertEquals(list(items), [("a", 2), ("a", 1), ("b", 1)])
|
Add tests for sorted stack
|
Add tests for sorted stack
|
Python
|
mit
|
michalbachowski/pylogging_utils,michalbachowski/pylogging_utils,michalbachowski/pylogging_utils
|
Add tests for sorted stack
|
# encoding: utf-8
import unittest
from logging_utils._compat import mock
from logging_utils.context.stack.sorted import SortedContextStack
class SimpleContextStackTest(unittest.TestCase):
def setUp(self):
self.inner = mock.MagicMock(spec=SortedContextStack)
self.inner.__str__.return_value = "foo"
self.inner.__iter__.return_value = [("b", 1), ("a", 2), ("a", 1)]
self.stack = SortedContextStack(self.inner)
def test_init_requires_one_arg(self):
self.assertRaises(TypeError, SortedContextStack)
def test_pop_calls_inner_stack(self):
self.stack.pop()
self.inner.pop.assert_called_once_with()
def test_push_calls_inner_stack(self):
self.stack.push(dict(a=1))
self.inner.push.assert_called_once_with(dict(a=1))
def test_str_calls_inner_stack_and_does_not_manipulate_output(self):
out = str(self.stack)
self.inner.__str__.assert_called_once_with()
self.assertEqual("foo", out)
def test_iter_calls_inner_iterable(self):
iter(self.stack)
self.inner.__iter__.assert_called_once_with()
def test_iter_returns_sorted_iterable(self):
items = iter(self.stack)
self.inner.__iter__.assert_called_once_with()
self.assertEquals(list(items), [("a", 2), ("a", 1), ("b", 1)])
|
<commit_before><commit_msg>Add tests for sorted stack<commit_after>
|
# encoding: utf-8
import unittest
from logging_utils._compat import mock
from logging_utils.context.stack.sorted import SortedContextStack
class SimpleContextStackTest(unittest.TestCase):
def setUp(self):
self.inner = mock.MagicMock(spec=SortedContextStack)
self.inner.__str__.return_value = "foo"
self.inner.__iter__.return_value = [("b", 1), ("a", 2), ("a", 1)]
self.stack = SortedContextStack(self.inner)
def test_init_requires_one_arg(self):
self.assertRaises(TypeError, SortedContextStack)
def test_pop_calls_inner_stack(self):
self.stack.pop()
self.inner.pop.assert_called_once_with()
def test_push_calls_inner_stack(self):
self.stack.push(dict(a=1))
self.inner.push.assert_called_once_with(dict(a=1))
def test_str_calls_inner_stack_and_does_not_manipulate_output(self):
out = str(self.stack)
self.inner.__str__.assert_called_once_with()
self.assertEqual("foo", out)
def test_iter_calls_inner_iterable(self):
iter(self.stack)
self.inner.__iter__.assert_called_once_with()
def test_iter_returns_sorted_iterable(self):
items = iter(self.stack)
self.inner.__iter__.assert_called_once_with()
self.assertEquals(list(items), [("a", 2), ("a", 1), ("b", 1)])
|
Add tests for sorted stack# encoding: utf-8
import unittest
from logging_utils._compat import mock
from logging_utils.context.stack.sorted import SortedContextStack
class SimpleContextStackTest(unittest.TestCase):
def setUp(self):
self.inner = mock.MagicMock(spec=SortedContextStack)
self.inner.__str__.return_value = "foo"
self.inner.__iter__.return_value = [("b", 1), ("a", 2), ("a", 1)]
self.stack = SortedContextStack(self.inner)
def test_init_requires_one_arg(self):
self.assertRaises(TypeError, SortedContextStack)
def test_pop_calls_inner_stack(self):
self.stack.pop()
self.inner.pop.assert_called_once_with()
def test_push_calls_inner_stack(self):
self.stack.push(dict(a=1))
self.inner.push.assert_called_once_with(dict(a=1))
def test_str_calls_inner_stack_and_does_not_manipulate_output(self):
out = str(self.stack)
self.inner.__str__.assert_called_once_with()
self.assertEqual("foo", out)
def test_iter_calls_inner_iterable(self):
iter(self.stack)
self.inner.__iter__.assert_called_once_with()
def test_iter_returns_sorted_iterable(self):
items = iter(self.stack)
self.inner.__iter__.assert_called_once_with()
self.assertEquals(list(items), [("a", 2), ("a", 1), ("b", 1)])
|
<commit_before><commit_msg>Add tests for sorted stack<commit_after># encoding: utf-8
import unittest
from logging_utils._compat import mock
from logging_utils.context.stack.sorted import SortedContextStack
class SimpleContextStackTest(unittest.TestCase):
def setUp(self):
self.inner = mock.MagicMock(spec=SortedContextStack)
self.inner.__str__.return_value = "foo"
self.inner.__iter__.return_value = [("b", 1), ("a", 2), ("a", 1)]
self.stack = SortedContextStack(self.inner)
def test_init_requires_one_arg(self):
self.assertRaises(TypeError, SortedContextStack)
def test_pop_calls_inner_stack(self):
self.stack.pop()
self.inner.pop.assert_called_once_with()
def test_push_calls_inner_stack(self):
self.stack.push(dict(a=1))
self.inner.push.assert_called_once_with(dict(a=1))
def test_str_calls_inner_stack_and_does_not_manipulate_output(self):
out = str(self.stack)
self.inner.__str__.assert_called_once_with()
self.assertEqual("foo", out)
def test_iter_calls_inner_iterable(self):
iter(self.stack)
self.inner.__iter__.assert_called_once_with()
def test_iter_returns_sorted_iterable(self):
items = iter(self.stack)
self.inner.__iter__.assert_called_once_with()
self.assertEquals(list(items), [("a", 2), ("a", 1), ("b", 1)])
|
|
ab3e5d78f883561d45ac6074f27c97eefa4c1540
|
test_bst.py
|
test_bst.py
|
from bst import BinarySearchTree
nodes = [5, 4, 8, 3, 43, 22, 7, 74, 2]
def test_insert_nodes():
"""size of tree equal to number of values inserted"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_contains_node():
"""expected nodes in tree"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.contains(74)
assert b.contains(2)
assert b.contains(5)
assert b.contains(43)
assert not b.contains(103)
def test_balance():
"""balance updated when node added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.balance() == 0
b.insert(90)
assert b.balance() == -1
def test_depth():
"""depth updated as nodes added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.depth() == 4
b.insert(90)
assert b.depth() == 5
def test_size():
"""size on empty and when populated"""
b = BinarySearchTree()
assert b.size() == 0
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_root():
"""check tree's root and child nodes"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.root.val == 5
assert b.root.left.val == 4
assert b.root.right.val == 8
|
Add tests for binary search tree
|
Add tests for binary search tree
|
Python
|
mit
|
nbeck90/data_structures_2
|
Add tests for binary search tree
|
from bst import BinarySearchTree
nodes = [5, 4, 8, 3, 43, 22, 7, 74, 2]
def test_insert_nodes():
"""size of tree equal to number of values inserted"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_contains_node():
"""expected nodes in tree"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.contains(74)
assert b.contains(2)
assert b.contains(5)
assert b.contains(43)
assert not b.contains(103)
def test_balance():
"""balance updated when node added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.balance() == 0
b.insert(90)
assert b.balance() == -1
def test_depth():
"""depth updated as nodes added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.depth() == 4
b.insert(90)
assert b.depth() == 5
def test_size():
"""size on empty and when populated"""
b = BinarySearchTree()
assert b.size() == 0
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_root():
"""check tree's root and child nodes"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.root.val == 5
assert b.root.left.val == 4
assert b.root.right.val == 8
|
<commit_before><commit_msg>Add tests for binary search tree<commit_after>
|
from bst import BinarySearchTree
nodes = [5, 4, 8, 3, 43, 22, 7, 74, 2]
def test_insert_nodes():
"""size of tree equal to number of values inserted"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_contains_node():
"""expected nodes in tree"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.contains(74)
assert b.contains(2)
assert b.contains(5)
assert b.contains(43)
assert not b.contains(103)
def test_balance():
"""balance updated when node added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.balance() == 0
b.insert(90)
assert b.balance() == -1
def test_depth():
"""depth updated as nodes added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.depth() == 4
b.insert(90)
assert b.depth() == 5
def test_size():
"""size on empty and when populated"""
b = BinarySearchTree()
assert b.size() == 0
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_root():
"""check tree's root and child nodes"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.root.val == 5
assert b.root.left.val == 4
assert b.root.right.val == 8
|
Add tests for binary search treefrom bst import BinarySearchTree
nodes = [5, 4, 8, 3, 43, 22, 7, 74, 2]
def test_insert_nodes():
"""size of tree equal to number of values inserted"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_contains_node():
"""expected nodes in tree"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.contains(74)
assert b.contains(2)
assert b.contains(5)
assert b.contains(43)
assert not b.contains(103)
def test_balance():
"""balance updated when node added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.balance() == 0
b.insert(90)
assert b.balance() == -1
def test_depth():
"""depth updated as nodes added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.depth() == 4
b.insert(90)
assert b.depth() == 5
def test_size():
"""size on empty and when populated"""
b = BinarySearchTree()
assert b.size() == 0
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_root():
"""check tree's root and child nodes"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.root.val == 5
assert b.root.left.val == 4
assert b.root.right.val == 8
|
<commit_before><commit_msg>Add tests for binary search tree<commit_after>from bst import BinarySearchTree
nodes = [5, 4, 8, 3, 43, 22, 7, 74, 2]
def test_insert_nodes():
"""size of tree equal to number of values inserted"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_contains_node():
"""expected nodes in tree"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.contains(74)
assert b.contains(2)
assert b.contains(5)
assert b.contains(43)
assert not b.contains(103)
def test_balance():
"""balance updated when node added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.balance() == 0
b.insert(90)
assert b.balance() == -1
def test_depth():
"""depth updated as nodes added"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.depth() == 4
b.insert(90)
assert b.depth() == 5
def test_size():
"""size on empty and when populated"""
b = BinarySearchTree()
assert b.size() == 0
for n in nodes:
b.insert(n)
assert b.size() == 9
def test_root():
"""check tree's root and child nodes"""
b = BinarySearchTree()
for n in nodes:
b.insert(n)
assert b.root.val == 5
assert b.root.left.val == 4
assert b.root.right.val == 8
|
|
d0199235c363bd7c8965c25e69b615ec66f36e99
|
django_nyt/migrations/0008_auto_20161023_1641.py
|
django_nyt/migrations/0008_auto_20161023_1641.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0007_add_modified_and_default_settings'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nyt_notifications', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='notificationtype',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nyt_settings', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
Add migration for new related_name and CASCADE constraints
|
Add migration for new related_name and CASCADE constraints
|
Python
|
apache-2.0
|
benjaoming/django-nyt,benjaoming/django-nyt
|
Add migration for new related_name and CASCADE constraints
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0007_add_modified_and_default_settings'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nyt_notifications', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='notificationtype',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nyt_settings', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
<commit_before><commit_msg>Add migration for new related_name and CASCADE constraints<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0007_add_modified_and_default_settings'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nyt_notifications', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='notificationtype',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nyt_settings', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
Add migration for new related_name and CASCADE constraints# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0007_add_modified_and_default_settings'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nyt_notifications', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='notificationtype',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nyt_settings', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
<commit_before><commit_msg>Add migration for new related_name and CASCADE constraints<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 14:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_nyt', '0007_add_modified_and_default_settings'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nyt_notifications', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AlterField(
model_name='notificationtype',
name='content_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='settings',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nyt_settings', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
|
cf5151aecf6552faec6232ef0ba57070b546a10d
|
app/handlers/test_base.py
|
app/handlers/test_base.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The base for the test related handlers."""
try:
import simplejson as json
except ImportError:
import json
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import utils.validator as validator
# pylint: disable=too-many-public-methods
class TestBaseHandler(hbase.BaseHandler):
"""Base class for test related API handlers.
This class provides methods and functions common to all the test API
endpoint.
"""
def __init__(self, application, request, **kwargs):
super(TestBaseHandler, self).__init__(application, request, **kwargs)
@staticmethod
def _token_validation_func():
return hcommon.valid_token_tests
def execute_put(self, *args, **kwargs):
"""Execute the PUT pre-operations."""
response = None
if self.validate_req_token("PUT"):
if kwargs and kwargs.get("id", None):
valid_request = self._valid_post_request()
if valid_request == 200:
try:
json_obj = json.loads(self.request.body.decode("utf8"))
valid_json, j_reason = validator.is_valid_json(
json_obj, self._valid_keys("PUT"))
if valid_json:
kwargs["json_obj"] = json_obj
kwargs["db_options"] = self.settings["dboptions"]
kwargs["reason"] = j_reason
response = self._put(*args, **kwargs)
else:
response = hresponse.HandlerResponse(400)
if j_reason:
response.reason = (
"Provided JSON is not valid: %s" %
j_reason)
else:
response.reason = "Provided JSON is not valid"
except ValueError, ex:
self.log.exception(ex)
error = "No JSON data found in the PUT request"
self.log.error(error)
response = hresponse.HandlerResponse(422)
response.reason = error
else:
response = hresponse.HandlerResponse(valid_request)
response.reason = (
"%s: %s" %
(
self._get_status_message(valid_request),
"Use %s as the content type" % self.content_type
)
)
else:
response = hresponse.HandlerResponse(400)
response.reason = "No ID specified"
else:
response = hresponse.HandlerResponse(403)
response.reason = hcommon.NOT_VALID_TOKEN
return response
|
Add base handler for test resources.
|
Add base handler for test resources.
|
Python
|
lgpl-2.1
|
kernelci/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend,kernelci/kernelci-backend
|
Add base handler for test resources.
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The base for the test related handlers."""
try:
import simplejson as json
except ImportError:
import json
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import utils.validator as validator
# pylint: disable=too-many-public-methods
class TestBaseHandler(hbase.BaseHandler):
"""Base class for test related API handlers.
This class provides methods and functions common to all the test API
endpoint.
"""
def __init__(self, application, request, **kwargs):
super(TestBaseHandler, self).__init__(application, request, **kwargs)
@staticmethod
def _token_validation_func():
return hcommon.valid_token_tests
def execute_put(self, *args, **kwargs):
"""Execute the PUT pre-operations."""
response = None
if self.validate_req_token("PUT"):
if kwargs and kwargs.get("id", None):
valid_request = self._valid_post_request()
if valid_request == 200:
try:
json_obj = json.loads(self.request.body.decode("utf8"))
valid_json, j_reason = validator.is_valid_json(
json_obj, self._valid_keys("PUT"))
if valid_json:
kwargs["json_obj"] = json_obj
kwargs["db_options"] = self.settings["dboptions"]
kwargs["reason"] = j_reason
response = self._put(*args, **kwargs)
else:
response = hresponse.HandlerResponse(400)
if j_reason:
response.reason = (
"Provided JSON is not valid: %s" %
j_reason)
else:
response.reason = "Provided JSON is not valid"
except ValueError, ex:
self.log.exception(ex)
error = "No JSON data found in the PUT request"
self.log.error(error)
response = hresponse.HandlerResponse(422)
response.reason = error
else:
response = hresponse.HandlerResponse(valid_request)
response.reason = (
"%s: %s" %
(
self._get_status_message(valid_request),
"Use %s as the content type" % self.content_type
)
)
else:
response = hresponse.HandlerResponse(400)
response.reason = "No ID specified"
else:
response = hresponse.HandlerResponse(403)
response.reason = hcommon.NOT_VALID_TOKEN
return response
|
<commit_before><commit_msg>Add base handler for test resources.<commit_after>
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The base for the test related handlers."""
try:
import simplejson as json
except ImportError:
import json
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import utils.validator as validator
# pylint: disable=too-many-public-methods
class TestBaseHandler(hbase.BaseHandler):
"""Base class for test related API handlers.
This class provides methods and functions common to all the test API
endpoint.
"""
def __init__(self, application, request, **kwargs):
super(TestBaseHandler, self).__init__(application, request, **kwargs)
@staticmethod
def _token_validation_func():
return hcommon.valid_token_tests
def execute_put(self, *args, **kwargs):
"""Execute the PUT pre-operations."""
response = None
if self.validate_req_token("PUT"):
if kwargs and kwargs.get("id", None):
valid_request = self._valid_post_request()
if valid_request == 200:
try:
json_obj = json.loads(self.request.body.decode("utf8"))
valid_json, j_reason = validator.is_valid_json(
json_obj, self._valid_keys("PUT"))
if valid_json:
kwargs["json_obj"] = json_obj
kwargs["db_options"] = self.settings["dboptions"]
kwargs["reason"] = j_reason
response = self._put(*args, **kwargs)
else:
response = hresponse.HandlerResponse(400)
if j_reason:
response.reason = (
"Provided JSON is not valid: %s" %
j_reason)
else:
response.reason = "Provided JSON is not valid"
except ValueError, ex:
self.log.exception(ex)
error = "No JSON data found in the PUT request"
self.log.error(error)
response = hresponse.HandlerResponse(422)
response.reason = error
else:
response = hresponse.HandlerResponse(valid_request)
response.reason = (
"%s: %s" %
(
self._get_status_message(valid_request),
"Use %s as the content type" % self.content_type
)
)
else:
response = hresponse.HandlerResponse(400)
response.reason = "No ID specified"
else:
response = hresponse.HandlerResponse(403)
response.reason = hcommon.NOT_VALID_TOKEN
return response
|
Add base handler for test resources.# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The base for the test related handlers."""
try:
import simplejson as json
except ImportError:
import json
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import utils.validator as validator
# pylint: disable=too-many-public-methods
class TestBaseHandler(hbase.BaseHandler):
"""Base class for test related API handlers.
This class provides methods and functions common to all the test API
endpoint.
"""
def __init__(self, application, request, **kwargs):
super(TestBaseHandler, self).__init__(application, request, **kwargs)
@staticmethod
def _token_validation_func():
return hcommon.valid_token_tests
def execute_put(self, *args, **kwargs):
"""Execute the PUT pre-operations."""
response = None
if self.validate_req_token("PUT"):
if kwargs and kwargs.get("id", None):
valid_request = self._valid_post_request()
if valid_request == 200:
try:
json_obj = json.loads(self.request.body.decode("utf8"))
valid_json, j_reason = validator.is_valid_json(
json_obj, self._valid_keys("PUT"))
if valid_json:
kwargs["json_obj"] = json_obj
kwargs["db_options"] = self.settings["dboptions"]
kwargs["reason"] = j_reason
response = self._put(*args, **kwargs)
else:
response = hresponse.HandlerResponse(400)
if j_reason:
response.reason = (
"Provided JSON is not valid: %s" %
j_reason)
else:
response.reason = "Provided JSON is not valid"
except ValueError, ex:
self.log.exception(ex)
error = "No JSON data found in the PUT request"
self.log.error(error)
response = hresponse.HandlerResponse(422)
response.reason = error
else:
response = hresponse.HandlerResponse(valid_request)
response.reason = (
"%s: %s" %
(
self._get_status_message(valid_request),
"Use %s as the content type" % self.content_type
)
)
else:
response = hresponse.HandlerResponse(400)
response.reason = "No ID specified"
else:
response = hresponse.HandlerResponse(403)
response.reason = hcommon.NOT_VALID_TOKEN
return response
|
<commit_before><commit_msg>Add base handler for test resources.<commit_after># This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The base for the test related handlers."""
try:
import simplejson as json
except ImportError:
import json
import handlers.base as hbase
import handlers.common as hcommon
import handlers.response as hresponse
import utils.validator as validator
# pylint: disable=too-many-public-methods
class TestBaseHandler(hbase.BaseHandler):
"""Base class for test related API handlers.
This class provides methods and functions common to all the test API
endpoint.
"""
def __init__(self, application, request, **kwargs):
super(TestBaseHandler, self).__init__(application, request, **kwargs)
@staticmethod
def _token_validation_func():
return hcommon.valid_token_tests
def execute_put(self, *args, **kwargs):
"""Execute the PUT pre-operations."""
response = None
if self.validate_req_token("PUT"):
if kwargs and kwargs.get("id", None):
valid_request = self._valid_post_request()
if valid_request == 200:
try:
json_obj = json.loads(self.request.body.decode("utf8"))
valid_json, j_reason = validator.is_valid_json(
json_obj, self._valid_keys("PUT"))
if valid_json:
kwargs["json_obj"] = json_obj
kwargs["db_options"] = self.settings["dboptions"]
kwargs["reason"] = j_reason
response = self._put(*args, **kwargs)
else:
response = hresponse.HandlerResponse(400)
if j_reason:
response.reason = (
"Provided JSON is not valid: %s" %
j_reason)
else:
response.reason = "Provided JSON is not valid"
except ValueError, ex:
self.log.exception(ex)
error = "No JSON data found in the PUT request"
self.log.error(error)
response = hresponse.HandlerResponse(422)
response.reason = error
else:
response = hresponse.HandlerResponse(valid_request)
response.reason = (
"%s: %s" %
(
self._get_status_message(valid_request),
"Use %s as the content type" % self.content_type
)
)
else:
response = hresponse.HandlerResponse(400)
response.reason = "No ID specified"
else:
response = hresponse.HandlerResponse(403)
response.reason = hcommon.NOT_VALID_TOKEN
return response
|
|
441e247207bf4a29dcb790d132145daa61ea2482
|
test/test_rc.py
|
test/test_rc.py
|
from mpi4py import rc
import mpiunittest as unittest
class TestRC(unittest.TestCase):
def testRC1(self):
rc(initialize = rc.initialize)
rc(threaded = rc.threaded)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
def testRC2(self):
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testRC3(self):
error = lambda: rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
if __name__ == '__main__':
unittest.main()
|
Add tests for mpi4py.rc function
|
test: Add tests for mpi4py.rc function
|
Python
|
bsd-2-clause
|
pressel/mpi4py,pressel/mpi4py,mpi4py/mpi4py,mpi4py/mpi4py,mpi4py/mpi4py,pressel/mpi4py,pressel/mpi4py
|
test: Add tests for mpi4py.rc function
|
from mpi4py import rc
import mpiunittest as unittest
class TestRC(unittest.TestCase):
def testRC1(self):
rc(initialize = rc.initialize)
rc(threaded = rc.threaded)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
def testRC2(self):
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testRC3(self):
error = lambda: rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>test: Add tests for mpi4py.rc function<commit_after>
|
from mpi4py import rc
import mpiunittest as unittest
class TestRC(unittest.TestCase):
def testRC1(self):
rc(initialize = rc.initialize)
rc(threaded = rc.threaded)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
def testRC2(self):
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testRC3(self):
error = lambda: rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
if __name__ == '__main__':
unittest.main()
|
test: Add tests for mpi4py.rc functionfrom mpi4py import rc
import mpiunittest as unittest
class TestRC(unittest.TestCase):
def testRC1(self):
rc(initialize = rc.initialize)
rc(threaded = rc.threaded)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
def testRC2(self):
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testRC3(self):
error = lambda: rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>test: Add tests for mpi4py.rc function<commit_after>from mpi4py import rc
import mpiunittest as unittest
class TestRC(unittest.TestCase):
def testRC1(self):
rc(initialize = rc.initialize)
rc(threaded = rc.threaded)
rc(thread_level = rc.thread_level)
rc(finalize = rc.finalize)
rc(fast_reduce = rc.fast_reduce)
rc(recv_mprobe = rc.recv_mprobe)
def testRC2(self):
kwargs = rc.__dict__.copy()
rc(**kwargs)
def testRC3(self):
error = lambda: rc(ABCXYZ=123456)
self.assertRaises(TypeError, error)
if __name__ == '__main__':
unittest.main()
|
|
7be04ca4024384542da6215e1cf592c727bb5138
|
examples/pairgrid_dotplot.py
|
examples/pairgrid_dotplot.py
|
"""
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
|
Add example script for making a multi-variable dot plot
|
Add example script for making a multi-variable dot plot
|
Python
|
bsd-3-clause
|
huongttlan/seaborn,tim777z/seaborn,muku42/seaborn,drewokane/seaborn,anntzer/seaborn,sauliusl/seaborn,ischwabacher/seaborn,jakevdp/seaborn,petebachant/seaborn,ashhher3/seaborn,sinhrks/seaborn,Lx37/seaborn,wrobstory/seaborn,kyleam/seaborn,lukauskas/seaborn,anntzer/seaborn,Guokr1991/seaborn,mwaskom/seaborn,mia1rab/seaborn,JWarmenhoven/seaborn,phobson/seaborn,olgabot/seaborn,dimarkov/seaborn,gef756/seaborn,mclevey/seaborn,nileracecrew/seaborn,oesteban/seaborn,phobson/seaborn,uhjish/seaborn,jat255/seaborn,lukauskas/seaborn,arokem/seaborn,arokem/seaborn,bsipocz/seaborn,dhimmel/seaborn,parantapa/seaborn,cwu2011/seaborn,mwaskom/seaborn,clarkfitzg/seaborn,lypzln/seaborn,q1ang/seaborn
|
Add example script for making a multi-variable dot plot
|
"""
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
|
<commit_before><commit_msg>Add example script for making a multi-variable dot plot<commit_after>
|
"""
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
|
Add example script for making a multi-variable dot plot"""
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
|
<commit_before><commit_msg>Add example script for making a multi-variable dot plot<commit_after>"""
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
|
|
8b80327ff23fd3a6ad02ef54203148705ee7d92b
|
exp/clusterexp/FowlkesExp.py
|
exp/clusterexp/FowlkesExp.py
|
"""
Try to replicate the toy dateset in the paper by Fowlkes et al., "Spectral Grouping
Using the Nystrom Method" and compute the spectrum.
"""
import numpy
import scipy.sparse
import matplotlib.pyplot as plt
from apgl.graph import SparseGraph, GeneralVertexList, GraphUtils
from exp.sandbox.Nystrom import Nystrom
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
numVertices = 150
X = numpy.zeros((numVertices, 2))
#Create circle
radius = 5
noise = 0.3
angles = numpy.random.rand(100)*2*numpy.pi
X[0:100, 0] = radius*numpy.sin(angles)
X[0:100, 1] = radius*numpy.cos(angles)
X[0:100, :] += numpy.random.randn(100, 2)*noise
#Create blob
R = 1
centre = numpy.array([5-R, 0])
X[100:, :] = centre + numpy.random.randn(50, 2)*noise
plt.figure(0)
plt.scatter(X[0:100, 0], X[0:100, 1], c="r")
plt.scatter(X[100:, 0], X[100:, 1], c="b")
#Compute weight matrix
sigma = 0.2
W = numpy.zeros((numVertices, numVertices))
for i in range(numVertices):
for j in range(numVertices):
W[i, j] = numpy.exp(-(numpy.linalg.norm(X[i, :] - X[j, :])**2)/(2*sigma**2))
graph = SparseGraph(GeneralVertexList(numVertices))
graph.setWeightMatrix(W)
L = graph.normalisedLaplacianSym()
#L = GraphUtils.shiftLaplacian(scipy.sparse.csr_matrix(W)).todense()
n = 100
omega, Q = numpy.linalg.eigh(L)
omega2, Q2 = Nystrom.eigpsd(L, n)
print(omega)
print(omega2)
plt.figure(1)
plt.plot(numpy.arange(omega.shape[0]), omega)
plt.plot(numpy.arange(omega2.shape[0]), omega2)
plt.show()
|
Test the dataset from the Fowlkes paper
|
Test the dataset from the Fowlkes paper
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Test the dataset from the Fowlkes paper
|
"""
Try to replicate the toy dateset in the paper by Fowlkes et al., "Spectral Grouping
Using the Nystrom Method" and compute the spectrum.
"""
import numpy
import scipy.sparse
import matplotlib.pyplot as plt
from apgl.graph import SparseGraph, GeneralVertexList, GraphUtils
from exp.sandbox.Nystrom import Nystrom
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
numVertices = 150
X = numpy.zeros((numVertices, 2))
#Create circle
radius = 5
noise = 0.3
angles = numpy.random.rand(100)*2*numpy.pi
X[0:100, 0] = radius*numpy.sin(angles)
X[0:100, 1] = radius*numpy.cos(angles)
X[0:100, :] += numpy.random.randn(100, 2)*noise
#Create blob
R = 1
centre = numpy.array([5-R, 0])
X[100:, :] = centre + numpy.random.randn(50, 2)*noise
plt.figure(0)
plt.scatter(X[0:100, 0], X[0:100, 1], c="r")
plt.scatter(X[100:, 0], X[100:, 1], c="b")
#Compute weight matrix
sigma = 0.2
W = numpy.zeros((numVertices, numVertices))
for i in range(numVertices):
for j in range(numVertices):
W[i, j] = numpy.exp(-(numpy.linalg.norm(X[i, :] - X[j, :])**2)/(2*sigma**2))
graph = SparseGraph(GeneralVertexList(numVertices))
graph.setWeightMatrix(W)
L = graph.normalisedLaplacianSym()
#L = GraphUtils.shiftLaplacian(scipy.sparse.csr_matrix(W)).todense()
n = 100
omega, Q = numpy.linalg.eigh(L)
omega2, Q2 = Nystrom.eigpsd(L, n)
print(omega)
print(omega2)
plt.figure(1)
plt.plot(numpy.arange(omega.shape[0]), omega)
plt.plot(numpy.arange(omega2.shape[0]), omega2)
plt.show()
|
<commit_before><commit_msg>Test the dataset from the Fowlkes paper <commit_after>
|
"""
Try to replicate the toy dateset in the paper by Fowlkes et al., "Spectral Grouping
Using the Nystrom Method" and compute the spectrum.
"""
import numpy
import scipy.sparse
import matplotlib.pyplot as plt
from apgl.graph import SparseGraph, GeneralVertexList, GraphUtils
from exp.sandbox.Nystrom import Nystrom
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
numVertices = 150
X = numpy.zeros((numVertices, 2))
#Create circle
radius = 5
noise = 0.3
angles = numpy.random.rand(100)*2*numpy.pi
X[0:100, 0] = radius*numpy.sin(angles)
X[0:100, 1] = radius*numpy.cos(angles)
X[0:100, :] += numpy.random.randn(100, 2)*noise
#Create blob
R = 1
centre = numpy.array([5-R, 0])
X[100:, :] = centre + numpy.random.randn(50, 2)*noise
plt.figure(0)
plt.scatter(X[0:100, 0], X[0:100, 1], c="r")
plt.scatter(X[100:, 0], X[100:, 1], c="b")
#Compute weight matrix
sigma = 0.2
W = numpy.zeros((numVertices, numVertices))
for i in range(numVertices):
for j in range(numVertices):
W[i, j] = numpy.exp(-(numpy.linalg.norm(X[i, :] - X[j, :])**2)/(2*sigma**2))
graph = SparseGraph(GeneralVertexList(numVertices))
graph.setWeightMatrix(W)
L = graph.normalisedLaplacianSym()
#L = GraphUtils.shiftLaplacian(scipy.sparse.csr_matrix(W)).todense()
n = 100
omega, Q = numpy.linalg.eigh(L)
omega2, Q2 = Nystrom.eigpsd(L, n)
print(omega)
print(omega2)
plt.figure(1)
plt.plot(numpy.arange(omega.shape[0]), omega)
plt.plot(numpy.arange(omega2.shape[0]), omega2)
plt.show()
|
Test the dataset from the Fowlkes paper """
Try to replicate the toy dateset in the paper by Fowlkes et al., "Spectral Grouping
Using the Nystrom Method" and compute the spectrum.
"""
import numpy
import scipy.sparse
import matplotlib.pyplot as plt
from apgl.graph import SparseGraph, GeneralVertexList, GraphUtils
from exp.sandbox.Nystrom import Nystrom
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
numVertices = 150
X = numpy.zeros((numVertices, 2))
#Create circle
radius = 5
noise = 0.3
angles = numpy.random.rand(100)*2*numpy.pi
X[0:100, 0] = radius*numpy.sin(angles)
X[0:100, 1] = radius*numpy.cos(angles)
X[0:100, :] += numpy.random.randn(100, 2)*noise
#Create blob
R = 1
centre = numpy.array([5-R, 0])
X[100:, :] = centre + numpy.random.randn(50, 2)*noise
plt.figure(0)
plt.scatter(X[0:100, 0], X[0:100, 1], c="r")
plt.scatter(X[100:, 0], X[100:, 1], c="b")
#Compute weight matrix
sigma = 0.2
W = numpy.zeros((numVertices, numVertices))
for i in range(numVertices):
for j in range(numVertices):
W[i, j] = numpy.exp(-(numpy.linalg.norm(X[i, :] - X[j, :])**2)/(2*sigma**2))
graph = SparseGraph(GeneralVertexList(numVertices))
graph.setWeightMatrix(W)
L = graph.normalisedLaplacianSym()
#L = GraphUtils.shiftLaplacian(scipy.sparse.csr_matrix(W)).todense()
n = 100
omega, Q = numpy.linalg.eigh(L)
omega2, Q2 = Nystrom.eigpsd(L, n)
print(omega)
print(omega2)
plt.figure(1)
plt.plot(numpy.arange(omega.shape[0]), omega)
plt.plot(numpy.arange(omega2.shape[0]), omega2)
plt.show()
|
<commit_before><commit_msg>Test the dataset from the Fowlkes paper <commit_after>"""
Try to replicate the toy dateset in the paper by Fowlkes et al., "Spectral Grouping
Using the Nystrom Method" and compute the spectrum.
"""
import numpy
import scipy.sparse
import matplotlib.pyplot as plt
from apgl.graph import SparseGraph, GeneralVertexList, GraphUtils
from exp.sandbox.Nystrom import Nystrom
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
numVertices = 150
X = numpy.zeros((numVertices, 2))
#Create circle
radius = 5
noise = 0.3
angles = numpy.random.rand(100)*2*numpy.pi
X[0:100, 0] = radius*numpy.sin(angles)
X[0:100, 1] = radius*numpy.cos(angles)
X[0:100, :] += numpy.random.randn(100, 2)*noise
#Create blob
R = 1
centre = numpy.array([5-R, 0])
X[100:, :] = centre + numpy.random.randn(50, 2)*noise
plt.figure(0)
plt.scatter(X[0:100, 0], X[0:100, 1], c="r")
plt.scatter(X[100:, 0], X[100:, 1], c="b")
#Compute weight matrix
sigma = 0.2
W = numpy.zeros((numVertices, numVertices))
for i in range(numVertices):
for j in range(numVertices):
W[i, j] = numpy.exp(-(numpy.linalg.norm(X[i, :] - X[j, :])**2)/(2*sigma**2))
graph = SparseGraph(GeneralVertexList(numVertices))
graph.setWeightMatrix(W)
L = graph.normalisedLaplacianSym()
#L = GraphUtils.shiftLaplacian(scipy.sparse.csr_matrix(W)).todense()
n = 100
omega, Q = numpy.linalg.eigh(L)
omega2, Q2 = Nystrom.eigpsd(L, n)
print(omega)
print(omega2)
plt.figure(1)
plt.plot(numpy.arange(omega.shape[0]), omega)
plt.plot(numpy.arange(omega2.shape[0]), omega2)
plt.show()
|
|
20782297afa70ff138e20de4b06271793114a5e6
|
bin/FindRandomasciiProfilePresets.py
|
bin/FindRandomasciiProfilePresets.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script scans a .wpaProfile file looking for presets that start with
Randomascii. These are the custom presets shipped with UIforETW. When updating
the startup profile it is easy for these to get deleted, so a tool for auditing
changes is quite helpful.
'''
from __future__ import print_function
import re
import sys
if len(sys.argv) < 2:
print('Usage: %s profilename.wpaProfile' % sys.argv[0])
print('Prints a list of Randomascii presets in a .wpaProfile file.')
sys.exit(0)
count = 0
for line in open(sys.argv[1]).readlines():
match = re.match(r'.*<Preset Name="(Randomascii[^"]*)".*', line, flags=re.IGNORECASE)
if match:
print(' %s' % match.groups()[0])
count += 1
print('Found %d presets.' % count)
|
Add script for enumerating Randomascii presets
|
Add script for enumerating Randomascii presets
The startup10.wpaProfile file which ships with UIforETW sets up a
default view which I think is a better introduction to ETW profiling.
It needs updating occasionally, and sometimes some of the presets can
get lost. This script prints all of the Randomascii presets that I have
added so that I can more easily tell if any have been removed.
|
Python
|
apache-2.0
|
ariccio/UIforETW,google/UIforETW,google/UIforETW,google/UIforETW,ariccio/UIforETW,google/UIforETW,ariccio/UIforETW,ariccio/UIforETW
|
Add script for enumerating Randomascii presets
The startup10.wpaProfile file which ships with UIforETW sets up a
default view which I think is a better introduction to ETW profiling.
It needs updating occasionally, and sometimes some of the presets can
get lost. This script prints all of the Randomascii presets that I have
added so that I can more easily tell if any have been removed.
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script scans a .wpaProfile file looking for presets that start with
Randomascii. These are the custom presets shipped with UIforETW. When updating
the startup profile it is easy for these to get deleted, so a tool for auditing
changes is quite helpful.
'''
from __future__ import print_function
import re
import sys
if len(sys.argv) < 2:
print('Usage: %s profilename.wpaProfile' % sys.argv[0])
print('Prints a list of Randomascii presets in a .wpaProfile file.')
sys.exit(0)
count = 0
for line in open(sys.argv[1]).readlines():
match = re.match(r'.*<Preset Name="(Randomascii[^"]*)".*', line, flags=re.IGNORECASE)
if match:
print(' %s' % match.groups()[0])
count += 1
print('Found %d presets.' % count)
|
<commit_before><commit_msg>Add script for enumerating Randomascii presets
The startup10.wpaProfile file which ships with UIforETW sets up a
default view which I think is a better introduction to ETW profiling.
It needs updating occasionally, and sometimes some of the presets can
get lost. This script prints all of the Randomascii presets that I have
added so that I can more easily tell if any have been removed.<commit_after>
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script scans a .wpaProfile file looking for presets that start with
Randomascii. These are the custom presets shipped with UIforETW. When updating
the startup profile it is easy for these to get deleted, so a tool for auditing
changes is quite helpful.
'''
from __future__ import print_function
import re
import sys
if len(sys.argv) < 2:
print('Usage: %s profilename.wpaProfile' % sys.argv[0])
print('Prints a list of Randomascii presets in a .wpaProfile file.')
sys.exit(0)
count = 0
for line in open(sys.argv[1]).readlines():
match = re.match(r'.*<Preset Name="(Randomascii[^"]*)".*', line, flags=re.IGNORECASE)
if match:
print(' %s' % match.groups()[0])
count += 1
print('Found %d presets.' % count)
|
Add script for enumerating Randomascii presets
The startup10.wpaProfile file which ships with UIforETW sets up a
default view which I think is a better introduction to ETW profiling.
It needs updating occasionally, and sometimes some of the presets can
get lost. This script prints all of the Randomascii presets that I have
added so that I can more easily tell if any have been removed.# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script scans a .wpaProfile file looking for presets that start with
Randomascii. These are the custom presets shipped with UIforETW. When updating
the startup profile it is easy for these to get deleted, so a tool for auditing
changes is quite helpful.
'''
from __future__ import print_function
import re
import sys
if len(sys.argv) < 2:
print('Usage: %s profilename.wpaProfile' % sys.argv[0])
print('Prints a list of Randomascii presets in a .wpaProfile file.')
sys.exit(0)
count = 0
for line in open(sys.argv[1]).readlines():
match = re.match(r'.*<Preset Name="(Randomascii[^"]*)".*', line, flags=re.IGNORECASE)
if match:
print(' %s' % match.groups()[0])
count += 1
print('Found %d presets.' % count)
|
<commit_before><commit_msg>Add script for enumerating Randomascii presets
The startup10.wpaProfile file which ships with UIforETW sets up a
default view which I think is a better introduction to ETW profiling.
It needs updating occasionally, and sometimes some of the presets can
get lost. This script prints all of the Randomascii presets that I have
added so that I can more easily tell if any have been removed.<commit_after># Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script scans a .wpaProfile file looking for presets that start with
Randomascii. These are the custom presets shipped with UIforETW. When updating
the startup profile it is easy for these to get deleted, so a tool for auditing
changes is quite helpful.
'''
from __future__ import print_function
import re
import sys
if len(sys.argv) < 2:
print('Usage: %s profilename.wpaProfile' % sys.argv[0])
print('Prints a list of Randomascii presets in a .wpaProfile file.')
sys.exit(0)
count = 0
for line in open(sys.argv[1]).readlines():
match = re.match(r'.*<Preset Name="(Randomascii[^"]*)".*', line, flags=re.IGNORECASE)
if match:
print(' %s' % match.groups()[0])
count += 1
print('Found %d presets.' % count)
|
|
946b72c0dec3880943859226e9626f32cb64a855
|
project/category/helpers.py
|
project/category/helpers.py
|
import re
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
|
Create helper module with slugify function
|
Create helper module with slugify function
|
Python
|
mit
|
dylanshine/streamschool,dylanshine/streamschool
|
Create helper module with slugify function
|
import re
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
|
<commit_before><commit_msg>Create helper module with slugify function<commit_after>
|
import re
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
|
Create helper module with slugify functionimport re
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
|
<commit_before><commit_msg>Create helper module with slugify function<commit_after>import re
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
|
|
596aceae0b76d919bc85cafdbb3128bae647ea9f
|
config_diag/tests/test_policy.py
|
config_diag/tests/test_policy.py
|
import os
import numpy as np
from ..policy import MDPDialogBuilder
class TestMDPDialogBuilder(object):
def setup(self):
tests_dir = os.path.abspath(os.path.dirname(__file__))
csv_file = os.path.join(tests_dir, "Titanic.csv")
self.config_sample = np.genfromtxt(csv_file, skip_header=1,
dtype=np.dtype(str),
delimiter=",")
def _test_builder(self, mdp_algorithm):
builder = MDPDialogBuilder(config_sample=self.config_sample,
mdp_algorithm=mdp_algorithm)
dialog = builder.build_dialog()
def test_value_iteration_builder(self):
self._test_builder("value-iteration")
def test_policy_iteration_builder(self):
self._test_builder("policy-iteration")
|
Add a test case for MDPDialogBuilder
|
Add a test case for MDPDialogBuilder
|
Python
|
apache-2.0
|
yasserglez/configurator,yasserglez/configurator
|
Add a test case for MDPDialogBuilder
|
import os
import numpy as np
from ..policy import MDPDialogBuilder
class TestMDPDialogBuilder(object):
def setup(self):
tests_dir = os.path.abspath(os.path.dirname(__file__))
csv_file = os.path.join(tests_dir, "Titanic.csv")
self.config_sample = np.genfromtxt(csv_file, skip_header=1,
dtype=np.dtype(str),
delimiter=",")
def _test_builder(self, mdp_algorithm):
builder = MDPDialogBuilder(config_sample=self.config_sample,
mdp_algorithm=mdp_algorithm)
dialog = builder.build_dialog()
def test_value_iteration_builder(self):
self._test_builder("value-iteration")
def test_policy_iteration_builder(self):
self._test_builder("policy-iteration")
|
<commit_before><commit_msg>Add a test case for MDPDialogBuilder<commit_after>
|
import os
import numpy as np
from ..policy import MDPDialogBuilder
class TestMDPDialogBuilder(object):
def setup(self):
tests_dir = os.path.abspath(os.path.dirname(__file__))
csv_file = os.path.join(tests_dir, "Titanic.csv")
self.config_sample = np.genfromtxt(csv_file, skip_header=1,
dtype=np.dtype(str),
delimiter=",")
def _test_builder(self, mdp_algorithm):
builder = MDPDialogBuilder(config_sample=self.config_sample,
mdp_algorithm=mdp_algorithm)
dialog = builder.build_dialog()
def test_value_iteration_builder(self):
self._test_builder("value-iteration")
def test_policy_iteration_builder(self):
self._test_builder("policy-iteration")
|
Add a test case for MDPDialogBuilderimport os
import numpy as np
from ..policy import MDPDialogBuilder
class TestMDPDialogBuilder(object):
def setup(self):
tests_dir = os.path.abspath(os.path.dirname(__file__))
csv_file = os.path.join(tests_dir, "Titanic.csv")
self.config_sample = np.genfromtxt(csv_file, skip_header=1,
dtype=np.dtype(str),
delimiter=",")
def _test_builder(self, mdp_algorithm):
builder = MDPDialogBuilder(config_sample=self.config_sample,
mdp_algorithm=mdp_algorithm)
dialog = builder.build_dialog()
def test_value_iteration_builder(self):
self._test_builder("value-iteration")
def test_policy_iteration_builder(self):
self._test_builder("policy-iteration")
|
<commit_before><commit_msg>Add a test case for MDPDialogBuilder<commit_after>import os
import numpy as np
from ..policy import MDPDialogBuilder
class TestMDPDialogBuilder(object):
def setup(self):
tests_dir = os.path.abspath(os.path.dirname(__file__))
csv_file = os.path.join(tests_dir, "Titanic.csv")
self.config_sample = np.genfromtxt(csv_file, skip_header=1,
dtype=np.dtype(str),
delimiter=",")
def _test_builder(self, mdp_algorithm):
builder = MDPDialogBuilder(config_sample=self.config_sample,
mdp_algorithm=mdp_algorithm)
dialog = builder.build_dialog()
def test_value_iteration_builder(self):
self._test_builder("value-iteration")
def test_policy_iteration_builder(self):
self._test_builder("policy-iteration")
|
|
86abb9a55433e09ae5686bcbf9456ca891f41867
|
senlin/tests/tempest/api/nodes/test_node_action.py
|
senlin/tests/tempest/api/nodes/test_node_action.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestNodeAction(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestNodeAction, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create test node
cls.node = cls.create_test_node(cls.profile['id'])
@classmethod
def resource_cleanup(cls):
# Delete node
cls.delete_test_node(cls.node['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestNodeAction, cls).resource_cleanup()
@decorators.idempotent_id('ae124bfe-9fcf-4e87-91b7-319102efbdcc')
def test_node_action_trigger(self):
params = {
'check': {
}
}
# Trigger node action
res = self.client.trigger_action('nodes', self.node['id'],
params=params)
# Verfiy resp code, body and location in headers
self.assertEqual(202, res['status'])
self.assertIn('actions', res['location'])
action_id = res['location'].split('/actions/')[1]
self.wait_for_status('actions', action_id, 'SUCCEEDED')
|
Add API tests for node action
|
Add API tests for node action
Add API tests for triggering node action
blueprint tempest-plugin-interface
Change-Id: Iaafdd3190dba9cd5173d676b2010ad3c6f9215a5
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
|
Add API tests for node action
Add API tests for triggering node action
blueprint tempest-plugin-interface
Change-Id: Iaafdd3190dba9cd5173d676b2010ad3c6f9215a5
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestNodeAction(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestNodeAction, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create test node
cls.node = cls.create_test_node(cls.profile['id'])
@classmethod
def resource_cleanup(cls):
# Delete node
cls.delete_test_node(cls.node['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestNodeAction, cls).resource_cleanup()
@decorators.idempotent_id('ae124bfe-9fcf-4e87-91b7-319102efbdcc')
def test_node_action_trigger(self):
params = {
'check': {
}
}
# Trigger node action
res = self.client.trigger_action('nodes', self.node['id'],
params=params)
# Verfiy resp code, body and location in headers
self.assertEqual(202, res['status'])
self.assertIn('actions', res['location'])
action_id = res['location'].split('/actions/')[1]
self.wait_for_status('actions', action_id, 'SUCCEEDED')
|
<commit_before><commit_msg>Add API tests for node action
Add API tests for triggering node action
blueprint tempest-plugin-interface
Change-Id: Iaafdd3190dba9cd5173d676b2010ad3c6f9215a5<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestNodeAction(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestNodeAction, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create test node
cls.node = cls.create_test_node(cls.profile['id'])
@classmethod
def resource_cleanup(cls):
# Delete node
cls.delete_test_node(cls.node['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestNodeAction, cls).resource_cleanup()
@decorators.idempotent_id('ae124bfe-9fcf-4e87-91b7-319102efbdcc')
def test_node_action_trigger(self):
params = {
'check': {
}
}
# Trigger node action
res = self.client.trigger_action('nodes', self.node['id'],
params=params)
# Verfiy resp code, body and location in headers
self.assertEqual(202, res['status'])
self.assertIn('actions', res['location'])
action_id = res['location'].split('/actions/')[1]
self.wait_for_status('actions', action_id, 'SUCCEEDED')
|
Add API tests for node action
Add API tests for triggering node action
blueprint tempest-plugin-interface
Change-Id: Iaafdd3190dba9cd5173d676b2010ad3c6f9215a5# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestNodeAction(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestNodeAction, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create test node
cls.node = cls.create_test_node(cls.profile['id'])
@classmethod
def resource_cleanup(cls):
# Delete node
cls.delete_test_node(cls.node['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestNodeAction, cls).resource_cleanup()
@decorators.idempotent_id('ae124bfe-9fcf-4e87-91b7-319102efbdcc')
def test_node_action_trigger(self):
params = {
'check': {
}
}
# Trigger node action
res = self.client.trigger_action('nodes', self.node['id'],
params=params)
# Verfiy resp code, body and location in headers
self.assertEqual(202, res['status'])
self.assertIn('actions', res['location'])
action_id = res['location'].split('/actions/')[1]
self.wait_for_status('actions', action_id, 'SUCCEEDED')
|
<commit_before><commit_msg>Add API tests for node action
Add API tests for triggering node action
blueprint tempest-plugin-interface
Change-Id: Iaafdd3190dba9cd5173d676b2010ad3c6f9215a5<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestNodeAction(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestNodeAction, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create test node
cls.node = cls.create_test_node(cls.profile['id'])
@classmethod
def resource_cleanup(cls):
# Delete node
cls.delete_test_node(cls.node['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestNodeAction, cls).resource_cleanup()
@decorators.idempotent_id('ae124bfe-9fcf-4e87-91b7-319102efbdcc')
def test_node_action_trigger(self):
params = {
'check': {
}
}
# Trigger node action
res = self.client.trigger_action('nodes', self.node['id'],
params=params)
# Verfiy resp code, body and location in headers
self.assertEqual(202, res['status'])
self.assertIn('actions', res['location'])
action_id = res['location'].split('/actions/')[1]
self.wait_for_status('actions', action_id, 'SUCCEEDED')
|
|
9cc9685ea435db06ba915225b9c27c028b2e7cf5
|
editorconfig/versiontools.py
|
editorconfig/versiontools.py
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = list(map(int, split_version[:3])) + split_version[3:]
return tuple(split_version)
|
Convert map iterator to list (for Python3 support)
|
Convert map iterator to list (for Python3 support)
|
Python
|
bsd-2-clause
|
benjifisher/editorconfig-vim,pocke/editorconfig-vim,pocke/editorconfig-vim,johnfraney/editorconfig-vim,VictorBjelkholm/editorconfig-vim,VictorBjelkholm/editorconfig-vim,VictorBjelkholm/editorconfig-vim,benjifisher/editorconfig-vim,benjifisher/editorconfig-vim,johnfraney/editorconfig-vim,pocke/editorconfig-vim,johnfraney/editorconfig-vim
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
Convert map iterator to list (for Python3 support)
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = list(map(int, split_version[:3])) + split_version[3:]
return tuple(split_version)
|
<commit_before>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
<commit_msg>Convert map iterator to list (for Python3 support)<commit_after>
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = list(map(int, split_version[:3])) + split_version[3:]
return tuple(split_version)
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
Convert map iterator to list (for Python3 support)"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = list(map(int, split_version[:3])) + split_version[3:]
return tuple(split_version)
|
<commit_before>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
<commit_msg>Convert map iterator to list (for Python3 support)<commit_after>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = list(map(int, split_version[:3])) + split_version[3:]
return tuple(split_version)
|
e4869489d6187f7e023bf0b8ce6da80f717e6504
|
tests/lib/test_geo.py
|
tests/lib/test_geo.py
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib.geo import geographic_distance
from skylines.model.geo import Location
@pytest.mark.parametrize(
"loc1,loc2,expected",
[
(
Location(latitude=0.0, longitude=0.0),
Location(latitude=0.0, longitude=0.0),
0.0,
),
(
Location(latitude=38.898556, longitude=-77.037852),
Location(latitude=38.897147, longitude=-77.043934),
548.812,
),
],
)
def test_geographic_distance(loc1, loc2, expected):
result = geographic_distance(loc1, loc2)
assert result == pytest.approx(expected)
|
Add basic tests for `geographic_distance()`
|
lib/geo: Add basic tests for `geographic_distance()`
|
Python
|
agpl-3.0
|
skylines-project/skylines,skylines-project/skylines,skylines-project/skylines,skylines-project/skylines
|
lib/geo: Add basic tests for `geographic_distance()`
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib.geo import geographic_distance
from skylines.model.geo import Location
@pytest.mark.parametrize(
"loc1,loc2,expected",
[
(
Location(latitude=0.0, longitude=0.0),
Location(latitude=0.0, longitude=0.0),
0.0,
),
(
Location(latitude=38.898556, longitude=-77.037852),
Location(latitude=38.897147, longitude=-77.043934),
548.812,
),
],
)
def test_geographic_distance(loc1, loc2, expected):
result = geographic_distance(loc1, loc2)
assert result == pytest.approx(expected)
|
<commit_before><commit_msg>lib/geo: Add basic tests for `geographic_distance()`<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from skylines.lib.geo import geographic_distance
from skylines.model.geo import Location
@pytest.mark.parametrize(
"loc1,loc2,expected",
[
(
Location(latitude=0.0, longitude=0.0),
Location(latitude=0.0, longitude=0.0),
0.0,
),
(
Location(latitude=38.898556, longitude=-77.037852),
Location(latitude=38.897147, longitude=-77.043934),
548.812,
),
],
)
def test_geographic_distance(loc1, loc2, expected):
result = geographic_distance(loc1, loc2)
assert result == pytest.approx(expected)
|
lib/geo: Add basic tests for `geographic_distance()`# -*- coding: utf-8 -*-
import pytest
from skylines.lib.geo import geographic_distance
from skylines.model.geo import Location
@pytest.mark.parametrize(
"loc1,loc2,expected",
[
(
Location(latitude=0.0, longitude=0.0),
Location(latitude=0.0, longitude=0.0),
0.0,
),
(
Location(latitude=38.898556, longitude=-77.037852),
Location(latitude=38.897147, longitude=-77.043934),
548.812,
),
],
)
def test_geographic_distance(loc1, loc2, expected):
result = geographic_distance(loc1, loc2)
assert result == pytest.approx(expected)
|
<commit_before><commit_msg>lib/geo: Add basic tests for `geographic_distance()`<commit_after># -*- coding: utf-8 -*-
import pytest
from skylines.lib.geo import geographic_distance
from skylines.model.geo import Location
@pytest.mark.parametrize(
"loc1,loc2,expected",
[
(
Location(latitude=0.0, longitude=0.0),
Location(latitude=0.0, longitude=0.0),
0.0,
),
(
Location(latitude=38.898556, longitude=-77.037852),
Location(latitude=38.897147, longitude=-77.043934),
548.812,
),
],
)
def test_geographic_distance(loc1, loc2, expected):
result = geographic_distance(loc1, loc2)
assert result == pytest.approx(expected)
|
|
3641035bd97c415e1e64fc9e8020e1468bbad227
|
camkes/internal/mkdirp.py
|
camkes/internal/mkdirp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''
mkdir -p
For some reason, there does not seem to be a simpler way of achieving thread-
safe directory creation in Python.
'''
import errno, os
def mkdirp(path):
assert not os.path.exists(path) or os.path.isdir(path)
try:
os.makedirs(path)
except OSError as e:
# Mask any errors that result from the directory existing.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
Add a function for thread-safe directory creation.
|
Add a function for thread-safe directory creation.
This is to be used in an upcoming commit.
|
Python
|
bsd-2-clause
|
smaccm/camkes-tool,smaccm/camkes-tool,smaccm/camkes-tool,smaccm/camkes-tool
|
Add a function for thread-safe directory creation.
This is to be used in an upcoming commit.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''
mkdir -p
For some reason, there does not seem to be a simpler way of achieving thread-
safe directory creation in Python.
'''
import errno, os
def mkdirp(path):
assert not os.path.exists(path) or os.path.isdir(path)
try:
os.makedirs(path)
except OSError as e:
# Mask any errors that result from the directory existing.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
<commit_before><commit_msg>Add a function for thread-safe directory creation.
This is to be used in an upcoming commit.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''
mkdir -p
For some reason, there does not seem to be a simpler way of achieving thread-
safe directory creation in Python.
'''
import errno, os
def mkdirp(path):
assert not os.path.exists(path) or os.path.isdir(path)
try:
os.makedirs(path)
except OSError as e:
# Mask any errors that result from the directory existing.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
Add a function for thread-safe directory creation.
This is to be used in an upcoming commit.#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''
mkdir -p
For some reason, there does not seem to be a simpler way of achieving thread-
safe directory creation in Python.
'''
import errno, os
def mkdirp(path):
assert not os.path.exists(path) or os.path.isdir(path)
try:
os.makedirs(path)
except OSError as e:
# Mask any errors that result from the directory existing.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
<commit_before><commit_msg>Add a function for thread-safe directory creation.
This is to be used in an upcoming commit.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''
mkdir -p
For some reason, there does not seem to be a simpler way of achieving thread-
safe directory creation in Python.
'''
import errno, os
def mkdirp(path):
assert not os.path.exists(path) or os.path.isdir(path)
try:
os.makedirs(path)
except OSError as e:
# Mask any errors that result from the directory existing.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
|
|
44de8b2d6b4b58b9ba8165ba52ee3c6852d97d38
|
tests/test_profile.py
|
tests/test_profile.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import os
from kobo.client import BaseClientCommandContainer
from kobo.cli import CommandOptionParser
class TestCommandContainer(BaseClientCommandContainer):
pass
class TestConf(unittest.TestCase):
def setUp(self):
self.command_container = TestCommandContainer()
def test_profile_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container)
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "")
self.assertEqual(option, None)
def test_profile_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "default-profile")
self.assertEqual(option.get_opt_string(), "--profile")
self.assertEqual(option.help, "specify profile (default: default-profile)")
def test_configuration_directory_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
# CommandOptionParser() doesn't store the configuration_file path in an instance variable, instead it's
# build in _load_profile() with the line below:
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc")
self.assertEqual(configuration_file, "/etc/default-profile.conf")
def test_configuration_directory_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile",
configuration_directory="/etc/client")
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc/client")
self.assertEqual(configuration_file, "/etc/client/default-profile.conf")
|
Add preliminary tests for profile support and configuration directory support
|
Add preliminary tests for profile support and configuration directory
support
New tests need to be written to see how CommandOptionParser sets up
its CLI interface and where it'll look for configuration files when
setting default_profile and configuration_directory at instantiation.
|
Python
|
lgpl-2.1
|
release-engineering/kobo,release-engineering/kobo,release-engineering/kobo,release-engineering/kobo
|
Add preliminary tests for profile support and configuration directory
support
New tests need to be written to see how CommandOptionParser sets up
its CLI interface and where it'll look for configuration files when
setting default_profile and configuration_directory at instantiation.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import os
from kobo.client import BaseClientCommandContainer
from kobo.cli import CommandOptionParser
class TestCommandContainer(BaseClientCommandContainer):
pass
class TestConf(unittest.TestCase):
def setUp(self):
self.command_container = TestCommandContainer()
def test_profile_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container)
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "")
self.assertEqual(option, None)
def test_profile_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "default-profile")
self.assertEqual(option.get_opt_string(), "--profile")
self.assertEqual(option.help, "specify profile (default: default-profile)")
def test_configuration_directory_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
# CommandOptionParser() doesn't store the configuration_file path in an instance variable, instead it's
# build in _load_profile() with the line below:
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc")
self.assertEqual(configuration_file, "/etc/default-profile.conf")
def test_configuration_directory_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile",
configuration_directory="/etc/client")
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc/client")
self.assertEqual(configuration_file, "/etc/client/default-profile.conf")
|
<commit_before><commit_msg>Add preliminary tests for profile support and configuration directory
support
New tests need to be written to see how CommandOptionParser sets up
its CLI interface and where it'll look for configuration files when
setting default_profile and configuration_directory at instantiation.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import os
from kobo.client import BaseClientCommandContainer
from kobo.cli import CommandOptionParser
class TestCommandContainer(BaseClientCommandContainer):
pass
class TestConf(unittest.TestCase):
def setUp(self):
self.command_container = TestCommandContainer()
def test_profile_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container)
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "")
self.assertEqual(option, None)
def test_profile_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "default-profile")
self.assertEqual(option.get_opt_string(), "--profile")
self.assertEqual(option.help, "specify profile (default: default-profile)")
def test_configuration_directory_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
# CommandOptionParser() doesn't store the configuration_file path in an instance variable, instead it's
# build in _load_profile() with the line below:
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc")
self.assertEqual(configuration_file, "/etc/default-profile.conf")
def test_configuration_directory_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile",
configuration_directory="/etc/client")
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc/client")
self.assertEqual(configuration_file, "/etc/client/default-profile.conf")
|
Add preliminary tests for profile support and configuration directory
support
New tests need to be written to see how CommandOptionParser sets up
its CLI interface and where it'll look for configuration files when
setting default_profile and configuration_directory at instantiation.#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import os
from kobo.client import BaseClientCommandContainer
from kobo.cli import CommandOptionParser
class TestCommandContainer(BaseClientCommandContainer):
pass
class TestConf(unittest.TestCase):
def setUp(self):
self.command_container = TestCommandContainer()
def test_profile_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container)
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "")
self.assertEqual(option, None)
def test_profile_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "default-profile")
self.assertEqual(option.get_opt_string(), "--profile")
self.assertEqual(option.help, "specify profile (default: default-profile)")
def test_configuration_directory_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
# CommandOptionParser() doesn't store the configuration_file path in an instance variable, instead it's
# build in _load_profile() with the line below:
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc")
self.assertEqual(configuration_file, "/etc/default-profile.conf")
def test_configuration_directory_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile",
configuration_directory="/etc/client")
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc/client")
self.assertEqual(configuration_file, "/etc/client/default-profile.conf")
|
<commit_before><commit_msg>Add preliminary tests for profile support and configuration directory
support
New tests need to be written to see how CommandOptionParser sets up
its CLI interface and where it'll look for configuration files when
setting default_profile and configuration_directory at instantiation.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import os
from kobo.client import BaseClientCommandContainer
from kobo.cli import CommandOptionParser
class TestCommandContainer(BaseClientCommandContainer):
pass
class TestConf(unittest.TestCase):
def setUp(self):
self.command_container = TestCommandContainer()
def test_profile_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container)
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "")
self.assertEqual(option, None)
def test_profile_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
option = parser.get_option("--profile")
self.assertEqual(parser.default_profile, "default-profile")
self.assertEqual(option.get_opt_string(), "--profile")
self.assertEqual(option.help, "specify profile (default: default-profile)")
def test_configuration_directory_option_unset(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile")
# CommandOptionParser() doesn't store the configuration_file path in an instance variable, instead it's
# build in _load_profile() with the line below:
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc")
self.assertEqual(configuration_file, "/etc/default-profile.conf")
def test_configuration_directory_option_set(self):
parser = CommandOptionParser(command_container=self.command_container, default_profile="default-profile",
configuration_directory="/etc/client")
configuration_file = os.path.join(parser.configuration_directory, '{0}.conf'.format(parser.default_profile))
self.assertEqual(parser.configuration_directory, "/etc/client")
self.assertEqual(configuration_file, "/etc/client/default-profile.conf")
|
|
c88ae40c88701a4ff065e5529d5dfe2d15174a83
|
mapit/management/commands/mapit_UK_add_ons_to_gss.py
|
mapit/management/commands/mapit_UK_add_ons_to_gss.py
|
# This script is for a one off import of all the old ONS codes to a MapIt
# containing only the new ones from a modern Boundary-Line.
import csv
from django.core.management.base import NoArgsCommand
from mapit.models import Area, CodeType
from psycopg2 import IntegrityError
def process(new_code, old_code):
try:
area = Area.objects.get(codes__code=new_code, codes__type__code='gss')
except Area.DoesNotExist:
# An area that existed at the time of the mapping, but no longer
return
# Check if already has the right code
if 'ons' in area.all_codes and area.all_codes['ons'] == old_code:
return
try:
area.codes.create(type=CodeType.objects.get(code='ons'), code=old_code)
except IntegrityError:
raise Exception, "Key already exists for %s, can't give it %s" % (area, old_code)
class Command(NoArgsCommand):
help = 'Inserts the old ONS codes into mapit'
def handle_noargs(self, **options):
mapping = csv.reader(open('../data/UK/BL-2010-10-code-change.csv'))
mapping.next()
for row in mapping:
new_code, name, old_code = row[0], row[1], row[3]
process(new_code, old_code)
mapping = csv.reader(open('../data/UK/BL-2010-10-missing-codes.csv'))
mapping.next()
for row in mapping:
type, new_code, old_code, name = row
process(new_code, old_code)
|
Add script to add old ONS codes to a database containing only the new GSS ones.
|
Add script to add old ONS codes to a database containing only the new GSS ones.
|
Python
|
agpl-3.0
|
chris48s/mapit,opencorato/mapit,opencorato/mapit,chris48s/mapit,New-Bamboo/mapit,Sinar/mapit,Code4SA/mapit,Sinar/mapit,opencorato/mapit,Code4SA/mapit,Code4SA/mapit,chris48s/mapit,New-Bamboo/mapit
|
Add script to add old ONS codes to a database containing only the new GSS ones.
|
# This script is for a one off import of all the old ONS codes to a MapIt
# containing only the new ones from a modern Boundary-Line.
import csv
from django.core.management.base import NoArgsCommand
from mapit.models import Area, CodeType
from psycopg2 import IntegrityError
def process(new_code, old_code):
try:
area = Area.objects.get(codes__code=new_code, codes__type__code='gss')
except Area.DoesNotExist:
# An area that existed at the time of the mapping, but no longer
return
# Check if already has the right code
if 'ons' in area.all_codes and area.all_codes['ons'] == old_code:
return
try:
area.codes.create(type=CodeType.objects.get(code='ons'), code=old_code)
except IntegrityError:
raise Exception, "Key already exists for %s, can't give it %s" % (area, old_code)
class Command(NoArgsCommand):
help = 'Inserts the old ONS codes into mapit'
def handle_noargs(self, **options):
mapping = csv.reader(open('../data/UK/BL-2010-10-code-change.csv'))
mapping.next()
for row in mapping:
new_code, name, old_code = row[0], row[1], row[3]
process(new_code, old_code)
mapping = csv.reader(open('../data/UK/BL-2010-10-missing-codes.csv'))
mapping.next()
for row in mapping:
type, new_code, old_code, name = row
process(new_code, old_code)
|
<commit_before><commit_msg>Add script to add old ONS codes to a database containing only the new GSS ones.<commit_after>
|
# This script is for a one off import of all the old ONS codes to a MapIt
# containing only the new ones from a modern Boundary-Line.
import csv
from django.core.management.base import NoArgsCommand
from mapit.models import Area, CodeType
from psycopg2 import IntegrityError
def process(new_code, old_code):
try:
area = Area.objects.get(codes__code=new_code, codes__type__code='gss')
except Area.DoesNotExist:
# An area that existed at the time of the mapping, but no longer
return
# Check if already has the right code
if 'ons' in area.all_codes and area.all_codes['ons'] == old_code:
return
try:
area.codes.create(type=CodeType.objects.get(code='ons'), code=old_code)
except IntegrityError:
raise Exception, "Key already exists for %s, can't give it %s" % (area, old_code)
class Command(NoArgsCommand):
help = 'Inserts the old ONS codes into mapit'
def handle_noargs(self, **options):
mapping = csv.reader(open('../data/UK/BL-2010-10-code-change.csv'))
mapping.next()
for row in mapping:
new_code, name, old_code = row[0], row[1], row[3]
process(new_code, old_code)
mapping = csv.reader(open('../data/UK/BL-2010-10-missing-codes.csv'))
mapping.next()
for row in mapping:
type, new_code, old_code, name = row
process(new_code, old_code)
|
Add script to add old ONS codes to a database containing only the new GSS ones.# This script is for a one off import of all the old ONS codes to a MapIt
# containing only the new ones from a modern Boundary-Line.
import csv
from django.core.management.base import NoArgsCommand
from mapit.models import Area, CodeType
from psycopg2 import IntegrityError
def process(new_code, old_code):
try:
area = Area.objects.get(codes__code=new_code, codes__type__code='gss')
except Area.DoesNotExist:
# An area that existed at the time of the mapping, but no longer
return
# Check if already has the right code
if 'ons' in area.all_codes and area.all_codes['ons'] == old_code:
return
try:
area.codes.create(type=CodeType.objects.get(code='ons'), code=old_code)
except IntegrityError:
raise Exception, "Key already exists for %s, can't give it %s" % (area, old_code)
class Command(NoArgsCommand):
help = 'Inserts the old ONS codes into mapit'
def handle_noargs(self, **options):
mapping = csv.reader(open('../data/UK/BL-2010-10-code-change.csv'))
mapping.next()
for row in mapping:
new_code, name, old_code = row[0], row[1], row[3]
process(new_code, old_code)
mapping = csv.reader(open('../data/UK/BL-2010-10-missing-codes.csv'))
mapping.next()
for row in mapping:
type, new_code, old_code, name = row
process(new_code, old_code)
|
<commit_before><commit_msg>Add script to add old ONS codes to a database containing only the new GSS ones.<commit_after># This script is for a one off import of all the old ONS codes to a MapIt
# containing only the new ones from a modern Boundary-Line.
import csv
from django.core.management.base import NoArgsCommand
from mapit.models import Area, CodeType
from psycopg2 import IntegrityError
def process(new_code, old_code):
try:
area = Area.objects.get(codes__code=new_code, codes__type__code='gss')
except Area.DoesNotExist:
# An area that existed at the time of the mapping, but no longer
return
# Check if already has the right code
if 'ons' in area.all_codes and area.all_codes['ons'] == old_code:
return
try:
area.codes.create(type=CodeType.objects.get(code='ons'), code=old_code)
except IntegrityError:
raise Exception, "Key already exists for %s, can't give it %s" % (area, old_code)
class Command(NoArgsCommand):
help = 'Inserts the old ONS codes into mapit'
def handle_noargs(self, **options):
mapping = csv.reader(open('../data/UK/BL-2010-10-code-change.csv'))
mapping.next()
for row in mapping:
new_code, name, old_code = row[0], row[1], row[3]
process(new_code, old_code)
mapping = csv.reader(open('../data/UK/BL-2010-10-missing-codes.csv'))
mapping.next()
for row in mapping:
type, new_code, old_code, name = row
process(new_code, old_code)
|
|
1f4525ed0affb0eed7b3091aa775db0f2efa7b1e
|
mininet/test/test_ptyleak.py
|
mininet/test/test_ptyleak.py
|
#!/usr/bin/env python
"""
Regression test for pty leak in Node()
"""
import unittest
from mininet.net import Mininet
from mininet.clean import cleanup
from mininet.topo import SingleSwitchTopo
class TestPtyLeak( unittest.TestCase ):
"Verify that there is no pty leakage"
@staticmethod
def testPtyLeak():
"Test for pty leakage"
net = Mininet( SingleSwitchTopo() )
net.start()
host = net[ 'h1' ]
for _ in range( 0, 10 ):
oldptys = host.slave, host.master
net.delHost( host )
host = net.addHost( 'h1' )
assert ( host.slave, host.master ) == oldptys
net.stop()
if __name__ == '__main__':
unittest.main()
cleanup()
|
Test for Node()/Host() pty leakage
|
Test for Node()/Host() pty leakage
|
Python
|
bsd-3-clause
|
mininet/mininet,mininet/mininet,mininet/mininet
|
Test for Node()/Host() pty leakage
|
#!/usr/bin/env python
"""
Regression test for pty leak in Node()
"""
import unittest
from mininet.net import Mininet
from mininet.clean import cleanup
from mininet.topo import SingleSwitchTopo
class TestPtyLeak( unittest.TestCase ):
"Verify that there is no pty leakage"
@staticmethod
def testPtyLeak():
"Test for pty leakage"
net = Mininet( SingleSwitchTopo() )
net.start()
host = net[ 'h1' ]
for _ in range( 0, 10 ):
oldptys = host.slave, host.master
net.delHost( host )
host = net.addHost( 'h1' )
assert ( host.slave, host.master ) == oldptys
net.stop()
if __name__ == '__main__':
unittest.main()
cleanup()
|
<commit_before><commit_msg>Test for Node()/Host() pty leakage<commit_after>
|
#!/usr/bin/env python
"""
Regression test for pty leak in Node()
"""
import unittest
from mininet.net import Mininet
from mininet.clean import cleanup
from mininet.topo import SingleSwitchTopo
class TestPtyLeak( unittest.TestCase ):
"Verify that there is no pty leakage"
@staticmethod
def testPtyLeak():
"Test for pty leakage"
net = Mininet( SingleSwitchTopo() )
net.start()
host = net[ 'h1' ]
for _ in range( 0, 10 ):
oldptys = host.slave, host.master
net.delHost( host )
host = net.addHost( 'h1' )
assert ( host.slave, host.master ) == oldptys
net.stop()
if __name__ == '__main__':
unittest.main()
cleanup()
|
Test for Node()/Host() pty leakage#!/usr/bin/env python
"""
Regression test for pty leak in Node()
"""
import unittest
from mininet.net import Mininet
from mininet.clean import cleanup
from mininet.topo import SingleSwitchTopo
class TestPtyLeak( unittest.TestCase ):
"Verify that there is no pty leakage"
@staticmethod
def testPtyLeak():
"Test for pty leakage"
net = Mininet( SingleSwitchTopo() )
net.start()
host = net[ 'h1' ]
for _ in range( 0, 10 ):
oldptys = host.slave, host.master
net.delHost( host )
host = net.addHost( 'h1' )
assert ( host.slave, host.master ) == oldptys
net.stop()
if __name__ == '__main__':
unittest.main()
cleanup()
|
<commit_before><commit_msg>Test for Node()/Host() pty leakage<commit_after>#!/usr/bin/env python
"""
Regression test for pty leak in Node()
"""
import unittest
from mininet.net import Mininet
from mininet.clean import cleanup
from mininet.topo import SingleSwitchTopo
class TestPtyLeak( unittest.TestCase ):
"Verify that there is no pty leakage"
@staticmethod
def testPtyLeak():
"Test for pty leakage"
net = Mininet( SingleSwitchTopo() )
net.start()
host = net[ 'h1' ]
for _ in range( 0, 10 ):
oldptys = host.slave, host.master
net.delHost( host )
host = net.addHost( 'h1' )
assert ( host.slave, host.master ) == oldptys
net.stop()
if __name__ == '__main__':
unittest.main()
cleanup()
|
|
5d3da483d0d12bb16203261ce9e923c9d0a053e7
|
MAIN/solids_ISO_GUI.py
|
MAIN/solids_ISO_GUI.py
|
"""
PROGRAM SOLIDS
--------------
Computes the displacement solution for a finite element assembly
of 2D solids under point loads using as input easy-to-create
text files containing element, nodal, materials and loads data.
Fortran subroutines mesher.for and contour.for are also available to
write the required input files out of a Gmesh (.msh) generated file
and to convert the results file into Gmesh post-processor files.
Created by Juan Gomez and Nicolas Guarin-Zapata as part of the course:
IC0283 COMPUTATIONAL MODELLING
Universidad EAFIT
Departamento de Ingenieria Civil
Last updated January 2016
"""
import numpy as np
import preprocesor as pre
import postprocesor as pos
import assemutil as ass
from datetime import datetime
import matplotlib.pyplot as plt
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
echo = easygui.buttonbox("Do you want to echo files?",
choices=["Yes", "No"])
start_time = datetime.now()
#%%
# MODEL ASSEMBLY
#
# Reads the model
nodes, mats, elements, loads = pre.readin(folder=folder)
if echo == "Yes":
pre.echomod(nodes, mats, elements, loads, folder=folder)
# Retrieves problem parameters
ne, nn, nm, nl, COORD = pre.proini(nodes, mats, elements, loads)
# Counts equations and creates BCs array IBC
neq, IBC = ass.eqcounter(nn, nodes)
# Computes assembly operatorring
DME, IELCON = ass.DME(IBC, ne, elements)
# Assembles Global Stiffness Matrix KG
KG = ass.matassem(IBC, mats, elements, nn, ne, neq, COORD, DME, IELCON)
# Assembles Global Rigth Hand Side Vector RHSG
RHSG = ass.loadasem(loads, IBC, neq, nl)
#%%
# SYSTEM SOLUTION
#
# Solves the system
UG = np.linalg.solve(KG, RHSG)
print(np.allclose(np.dot(KG, UG), RHSG))
end_time = datetime.now()
print('Duration for system solution: {}'.format(end_time - start_time))
#%%
# POST-PROCCESSING
#
start_time = datetime.now()
pos.plot_disp2(IBC, UG, nodes, elements)
# Scatter displacements over the elements
UU = pos.scatter(DME, UG, ne, neq, elements)
# Generates points inside the elements and computes strain solution
E_nodes = pos.strain_nodes(IELCON, UU, ne, COORD, elements)
pos.plot_strain2(E_nodes, nodes, elements)
end_time = datetime.now()
print('Duration for post processing: {}'.format(end_time - start_time))
print('Program terminated succesfully!')
plt.show()
|
Add a simple GUI input
|
Add a simple GUI input
|
Python
|
mit
|
jgomezc1/FEM_PYTHON,AppliedMechanics-EAFIT/SolidsPy
|
Add a simple GUI input
|
"""
PROGRAM SOLIDS
--------------
Computes the displacement solution for a finite element assembly
of 2D solids under point loads using as input easy-to-create
text files containing element, nodal, materials and loads data.
Fortran subroutines mesher.for and contour.for are also available to
write the required input files out of a Gmesh (.msh) generated file
and to convert the results file into Gmesh post-processor files.
Created by Juan Gomez and Nicolas Guarin-Zapata as part of the course:
IC0283 COMPUTATIONAL MODELLING
Universidad EAFIT
Departamento de Ingenieria Civil
Last updated January 2016
"""
import numpy as np
import preprocesor as pre
import postprocesor as pos
import assemutil as ass
from datetime import datetime
import matplotlib.pyplot as plt
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
echo = easygui.buttonbox("Do you want to echo files?",
choices=["Yes", "No"])
start_time = datetime.now()
#%%
# MODEL ASSEMBLY
#
# Reads the model
nodes, mats, elements, loads = pre.readin(folder=folder)
if echo == "Yes":
pre.echomod(nodes, mats, elements, loads, folder=folder)
# Retrieves problem parameters
ne, nn, nm, nl, COORD = pre.proini(nodes, mats, elements, loads)
# Counts equations and creates BCs array IBC
neq, IBC = ass.eqcounter(nn, nodes)
# Computes assembly operatorring
DME, IELCON = ass.DME(IBC, ne, elements)
# Assembles Global Stiffness Matrix KG
KG = ass.matassem(IBC, mats, elements, nn, ne, neq, COORD, DME, IELCON)
# Assembles Global Rigth Hand Side Vector RHSG
RHSG = ass.loadasem(loads, IBC, neq, nl)
#%%
# SYSTEM SOLUTION
#
# Solves the system
UG = np.linalg.solve(KG, RHSG)
print(np.allclose(np.dot(KG, UG), RHSG))
end_time = datetime.now()
print('Duration for system solution: {}'.format(end_time - start_time))
#%%
# POST-PROCCESSING
#
start_time = datetime.now()
pos.plot_disp2(IBC, UG, nodes, elements)
# Scatter displacements over the elements
UU = pos.scatter(DME, UG, ne, neq, elements)
# Generates points inside the elements and computes strain solution
E_nodes = pos.strain_nodes(IELCON, UU, ne, COORD, elements)
pos.plot_strain2(E_nodes, nodes, elements)
end_time = datetime.now()
print('Duration for post processing: {}'.format(end_time - start_time))
print('Program terminated succesfully!')
plt.show()
|
<commit_before><commit_msg>Add a simple GUI input<commit_after>
|
"""
PROGRAM SOLIDS
--------------
Computes the displacement solution for a finite element assembly
of 2D solids under point loads using as input easy-to-create
text files containing element, nodal, materials and loads data.
Fortran subroutines mesher.for and contour.for are also available to
write the required input files out of a Gmesh (.msh) generated file
and to convert the results file into Gmesh post-processor files.
Created by Juan Gomez and Nicolas Guarin-Zapata as part of the course:
IC0283 COMPUTATIONAL MODELLING
Universidad EAFIT
Departamento de Ingenieria Civil
Last updated January 2016
"""
import numpy as np
import preprocesor as pre
import postprocesor as pos
import assemutil as ass
from datetime import datetime
import matplotlib.pyplot as plt
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
echo = easygui.buttonbox("Do you want to echo files?",
choices=["Yes", "No"])
start_time = datetime.now()
#%%
# MODEL ASSEMBLY
#
# Reads the model
nodes, mats, elements, loads = pre.readin(folder=folder)
if echo == "Yes":
pre.echomod(nodes, mats, elements, loads, folder=folder)
# Retrieves problem parameters
ne, nn, nm, nl, COORD = pre.proini(nodes, mats, elements, loads)
# Counts equations and creates BCs array IBC
neq, IBC = ass.eqcounter(nn, nodes)
# Computes assembly operatorring
DME, IELCON = ass.DME(IBC, ne, elements)
# Assembles Global Stiffness Matrix KG
KG = ass.matassem(IBC, mats, elements, nn, ne, neq, COORD, DME, IELCON)
# Assembles Global Rigth Hand Side Vector RHSG
RHSG = ass.loadasem(loads, IBC, neq, nl)
#%%
# SYSTEM SOLUTION
#
# Solves the system
UG = np.linalg.solve(KG, RHSG)
print(np.allclose(np.dot(KG, UG), RHSG))
end_time = datetime.now()
print('Duration for system solution: {}'.format(end_time - start_time))
#%%
# POST-PROCCESSING
#
start_time = datetime.now()
pos.plot_disp2(IBC, UG, nodes, elements)
# Scatter displacements over the elements
UU = pos.scatter(DME, UG, ne, neq, elements)
# Generates points inside the elements and computes strain solution
E_nodes = pos.strain_nodes(IELCON, UU, ne, COORD, elements)
pos.plot_strain2(E_nodes, nodes, elements)
end_time = datetime.now()
print('Duration for post processing: {}'.format(end_time - start_time))
print('Program terminated succesfully!')
plt.show()
|
Add a simple GUI input"""
PROGRAM SOLIDS
--------------
Computes the displacement solution for a finite element assembly
of 2D solids under point loads using as input easy-to-create
text files containing element, nodal, materials and loads data.
Fortran subroutines mesher.for and contour.for are also available to
write the required input files out of a Gmesh (.msh) generated file
and to convert the results file into Gmesh post-processor files.
Created by Juan Gomez and Nicolas Guarin-Zapata as part of the course:
IC0283 COMPUTATIONAL MODELLING
Universidad EAFIT
Departamento de Ingenieria Civil
Last updated January 2016
"""
import numpy as np
import preprocesor as pre
import postprocesor as pos
import assemutil as ass
from datetime import datetime
import matplotlib.pyplot as plt
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
echo = easygui.buttonbox("Do you want to echo files?",
choices=["Yes", "No"])
start_time = datetime.now()
#%%
# MODEL ASSEMBLY
#
# Reads the model
nodes, mats, elements, loads = pre.readin(folder=folder)
if echo == "Yes":
pre.echomod(nodes, mats, elements, loads, folder=folder)
# Retrieves problem parameters
ne, nn, nm, nl, COORD = pre.proini(nodes, mats, elements, loads)
# Counts equations and creates BCs array IBC
neq, IBC = ass.eqcounter(nn, nodes)
# Computes assembly operatorring
DME, IELCON = ass.DME(IBC, ne, elements)
# Assembles Global Stiffness Matrix KG
KG = ass.matassem(IBC, mats, elements, nn, ne, neq, COORD, DME, IELCON)
# Assembles Global Rigth Hand Side Vector RHSG
RHSG = ass.loadasem(loads, IBC, neq, nl)
#%%
# SYSTEM SOLUTION
#
# Solves the system
UG = np.linalg.solve(KG, RHSG)
print(np.allclose(np.dot(KG, UG), RHSG))
end_time = datetime.now()
print('Duration for system solution: {}'.format(end_time - start_time))
#%%
# POST-PROCCESSING
#
start_time = datetime.now()
pos.plot_disp2(IBC, UG, nodes, elements)
# Scatter displacements over the elements
UU = pos.scatter(DME, UG, ne, neq, elements)
# Generates points inside the elements and computes strain solution
E_nodes = pos.strain_nodes(IELCON, UU, ne, COORD, elements)
pos.plot_strain2(E_nodes, nodes, elements)
end_time = datetime.now()
print('Duration for post processing: {}'.format(end_time - start_time))
print('Program terminated succesfully!')
plt.show()
|
<commit_before><commit_msg>Add a simple GUI input<commit_after>"""
PROGRAM SOLIDS
--------------
Computes the displacement solution for a finite element assembly
of 2D solids under point loads using as input easy-to-create
text files containing element, nodal, materials and loads data.
Fortran subroutines mesher.for and contour.for are also available to
write the required input files out of a Gmesh (.msh) generated file
and to convert the results file into Gmesh post-processor files.
Created by Juan Gomez and Nicolas Guarin-Zapata as part of the course:
IC0283 COMPUTATIONAL MODELLING
Universidad EAFIT
Departamento de Ingenieria Civil
Last updated January 2016
"""
import numpy as np
import preprocesor as pre
import postprocesor as pos
import assemutil as ass
from datetime import datetime
import matplotlib.pyplot as plt
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
echo = easygui.buttonbox("Do you want to echo files?",
choices=["Yes", "No"])
start_time = datetime.now()
#%%
# MODEL ASSEMBLY
#
# Reads the model
nodes, mats, elements, loads = pre.readin(folder=folder)
if echo == "Yes":
pre.echomod(nodes, mats, elements, loads, folder=folder)
# Retrieves problem parameters
ne, nn, nm, nl, COORD = pre.proini(nodes, mats, elements, loads)
# Counts equations and creates BCs array IBC
neq, IBC = ass.eqcounter(nn, nodes)
# Computes assembly operatorring
DME, IELCON = ass.DME(IBC, ne, elements)
# Assembles Global Stiffness Matrix KG
KG = ass.matassem(IBC, mats, elements, nn, ne, neq, COORD, DME, IELCON)
# Assembles Global Rigth Hand Side Vector RHSG
RHSG = ass.loadasem(loads, IBC, neq, nl)
#%%
# SYSTEM SOLUTION
#
# Solves the system
UG = np.linalg.solve(KG, RHSG)
print(np.allclose(np.dot(KG, UG), RHSG))
end_time = datetime.now()
print('Duration for system solution: {}'.format(end_time - start_time))
#%%
# POST-PROCCESSING
#
start_time = datetime.now()
pos.plot_disp2(IBC, UG, nodes, elements)
# Scatter displacements over the elements
UU = pos.scatter(DME, UG, ne, neq, elements)
# Generates points inside the elements and computes strain solution
E_nodes = pos.strain_nodes(IELCON, UU, ne, COORD, elements)
pos.plot_strain2(E_nodes, nodes, elements)
end_time = datetime.now()
print('Duration for post processing: {}'.format(end_time - start_time))
print('Program terminated succesfully!')
plt.show()
|
|
4a75aa1bbfa7663460987143164e44970622e009
|
social_auth/backends/gae.py
|
social_auth/backends/gae.py
|
"""
Google App Engine support using User API
"""
from __future__ import absolute_import
from django.contrib.auth import authenticate
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from google.appengine.api import users
class GAEBackend(SocialAuthBackend):
"""BrowserID authentication backend"""
name = 'GAE'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email,
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""BrowserID authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
return users.create_login_url()
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not users.get_current_user():
raise ValueError('Authentication error')
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
|
Add a social_auth wrapper backend around the Google App Engine User API.
|
Add a social_auth wrapper backend around the Google App Engine User API.
|
Python
|
bsd-3-clause
|
VishvajitP/django-social-auth,WW-Digital/django-social-auth,antoviaque/django-social-auth-norel,lovehhf/django-social-auth,czpython/django-social-auth,limdauto/django-social-auth,vxvinh1511/django-social-auth,duoduo369/django-social-auth,vuchau/django-social-auth,dongguangming/django-social-auth,getsentry/django-social-auth,MjAbuz/django-social-auth,gustavoam/django-social-auth,krvss/django-social-auth,beswarm/django-social-auth,lovehhf/django-social-auth,1st/django-social-auth,limdauto/django-social-auth,mayankcu/Django-social,beswarm/django-social-auth,omab/django-social-auth,michael-borisov/django-social-auth,qas612820704/django-social-auth,vxvinh1511/django-social-auth,gustavoam/django-social-auth,caktus/django-social-auth,VishvajitP/django-social-auth,dongguangming/django-social-auth,MjAbuz/django-social-auth,michael-borisov/django-social-auth,omab/django-social-auth,caktus/django-social-auth,qas612820704/django-social-auth,adw0rd/django-social-auth,sk7/django-social-auth,vuchau/django-social-auth
|
Add a social_auth wrapper backend around the Google App Engine User API.
|
"""
Google App Engine support using User API
"""
from __future__ import absolute_import
from django.contrib.auth import authenticate
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from google.appengine.api import users
class GAEBackend(SocialAuthBackend):
"""BrowserID authentication backend"""
name = 'GAE'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email,
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""BrowserID authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
return users.create_login_url()
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not users.get_current_user():
raise ValueError('Authentication error')
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
|
<commit_before><commit_msg>Add a social_auth wrapper backend around the Google App Engine User API.<commit_after>
|
"""
Google App Engine support using User API
"""
from __future__ import absolute_import
from django.contrib.auth import authenticate
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from google.appengine.api import users
class GAEBackend(SocialAuthBackend):
"""BrowserID authentication backend"""
name = 'GAE'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email,
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""BrowserID authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
return users.create_login_url()
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not users.get_current_user():
raise ValueError('Authentication error')
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
|
Add a social_auth wrapper backend around the Google App Engine User API."""
Google App Engine support using User API
"""
from __future__ import absolute_import
from django.contrib.auth import authenticate
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from google.appengine.api import users
class GAEBackend(SocialAuthBackend):
"""BrowserID authentication backend"""
name = 'GAE'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email,
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""BrowserID authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
return users.create_login_url()
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not users.get_current_user():
raise ValueError('Authentication error')
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
|
<commit_before><commit_msg>Add a social_auth wrapper backend around the Google App Engine User API.<commit_after>"""
Google App Engine support using User API
"""
from __future__ import absolute_import
from django.contrib.auth import authenticate
from social_auth.backends import SocialAuthBackend, BaseAuth, USERNAME
from google.appengine.api import users
class GAEBackend(SocialAuthBackend):
"""BrowserID authentication backend"""
name = 'GAE'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
user = users.get_current_user()
if user:
return user.user_id()
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': 'name@server.com',
# 'issuer': 'browserid.org'}
user = users.get_current_user()
return {USERNAME: user.user_id(),
'email': user.email,
'fullname': '',
'first_name': '',
'last_name': ''}
# Auth classes
class GAEAuth(BaseAuth):
"""BrowserID authentication"""
AUTH_BACKEND = GAEBackend
def auth_url(self):
return users.create_login_url()
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not users.get_current_user():
raise ValueError('Authentication error')
return authenticate(*args, **kwargs)
# Backend definition
BACKENDS = {
'gae': GAEAuth,
}
|
|
8dcd1112e1c6408b7fc5e6b18d8e2b6352c4819b
|
server/defective_servers.py
|
server/defective_servers.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Defective XML Server in which every message character has LOSS_PROB chance
to be lost.
"""
import random
from xml_server import XMLServer
LOSS_PROB = 1.0/100
def random_loss(message):
return ''.join(map(lambda x: x if random.random() > LOSS_PROB else '',
message))
class RandomLossXMLServer(XMLServer):
def login(self, user, passwd):
return random_loss(XMLServer.login(self, user, passwd))
def get(self, parameter):
return random_loss(XMLServer.get(self, parameter))
def setPwState(self, state):
return random_loss(XMLServer.setPwState(self, state))
|
Add defective server implementation that randomly loses message characters.
|
Add defective server implementation that randomly loses message characters.
|
Python
|
apache-2.0
|
Solucionamos/dummybmc
|
Add defective server implementation that randomly loses message characters.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Defective XML Server in which every message character has LOSS_PROB chance
to be lost.
"""
import random
from xml_server import XMLServer
LOSS_PROB = 1.0/100
def random_loss(message):
return ''.join(map(lambda x: x if random.random() > LOSS_PROB else '',
message))
class RandomLossXMLServer(XMLServer):
def login(self, user, passwd):
return random_loss(XMLServer.login(self, user, passwd))
def get(self, parameter):
return random_loss(XMLServer.get(self, parameter))
def setPwState(self, state):
return random_loss(XMLServer.setPwState(self, state))
|
<commit_before><commit_msg>Add defective server implementation that randomly loses message characters.<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Defective XML Server in which every message character has LOSS_PROB chance
to be lost.
"""
import random
from xml_server import XMLServer
LOSS_PROB = 1.0/100
def random_loss(message):
return ''.join(map(lambda x: x if random.random() > LOSS_PROB else '',
message))
class RandomLossXMLServer(XMLServer):
def login(self, user, passwd):
return random_loss(XMLServer.login(self, user, passwd))
def get(self, parameter):
return random_loss(XMLServer.get(self, parameter))
def setPwState(self, state):
return random_loss(XMLServer.setPwState(self, state))
|
Add defective server implementation that randomly loses message characters.#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Defective XML Server in which every message character has LOSS_PROB chance
to be lost.
"""
import random
from xml_server import XMLServer
LOSS_PROB = 1.0/100
def random_loss(message):
return ''.join(map(lambda x: x if random.random() > LOSS_PROB else '',
message))
class RandomLossXMLServer(XMLServer):
def login(self, user, passwd):
return random_loss(XMLServer.login(self, user, passwd))
def get(self, parameter):
return random_loss(XMLServer.get(self, parameter))
def setPwState(self, state):
return random_loss(XMLServer.setPwState(self, state))
|
<commit_before><commit_msg>Add defective server implementation that randomly loses message characters.<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Defective XML Server in which every message character has LOSS_PROB chance
to be lost.
"""
import random
from xml_server import XMLServer
LOSS_PROB = 1.0/100
def random_loss(message):
return ''.join(map(lambda x: x if random.random() > LOSS_PROB else '',
message))
class RandomLossXMLServer(XMLServer):
def login(self, user, passwd):
return random_loss(XMLServer.login(self, user, passwd))
def get(self, parameter):
return random_loss(XMLServer.get(self, parameter))
def setPwState(self, state):
return random_loss(XMLServer.setPwState(self, state))
|
|
e36bdb49fd590f7bb0acf27593552c275c8979a8
|
numba/cuda/tests/cudapy/test_freevar.py
|
numba/cuda/tests/cudapy/test_freevar.py
|
from __future__ import print_function, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
class TestFreeVar(unittest.TestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in macros
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = numpy.arange(2, dtype="float32")
foo(A, 0)
if __name__ == '__main__':
unittest.main()
|
Add test for freevar in cuda macros
|
Add test for freevar in cuda macros
|
Python
|
bsd-2-clause
|
cpcloud/numba,jriehl/numba,pitrou/numba,seibert/numba,stuartarchibald/numba,pombredanne/numba,pitrou/numba,GaZ3ll3/numba,gdementen/numba,pombredanne/numba,IntelLabs/numba,gmarkall/numba,gdementen/numba,numba/numba,pombredanne/numba,jriehl/numba,numba/numba,pitrou/numba,sklam/numba,sklam/numba,GaZ3ll3/numba,stefanseefeld/numba,seibert/numba,gmarkall/numba,ssarangi/numba,stuartarchibald/numba,IntelLabs/numba,stefanseefeld/numba,stonebig/numba,ssarangi/numba,seibert/numba,jriehl/numba,jriehl/numba,cpcloud/numba,cpcloud/numba,GaZ3ll3/numba,numba/numba,IntelLabs/numba,cpcloud/numba,cpcloud/numba,gmarkall/numba,sklam/numba,pombredanne/numba,stefanseefeld/numba,IntelLabs/numba,IntelLabs/numba,stefanseefeld/numba,sklam/numba,gmarkall/numba,ssarangi/numba,numba/numba,ssarangi/numba,stonebig/numba,gmarkall/numba,gdementen/numba,pombredanne/numba,stonebig/numba,pitrou/numba,numba/numba,jriehl/numba,GaZ3ll3/numba,seibert/numba,gdementen/numba,stonebig/numba,gdementen/numba,GaZ3ll3/numba,pitrou/numba,sklam/numba,stefanseefeld/numba,ssarangi/numba,stuartarchibald/numba,stuartarchibald/numba,stuartarchibald/numba,seibert/numba,stonebig/numba
|
Add test for freevar in cuda macros
|
from __future__ import print_function, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
class TestFreeVar(unittest.TestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in macros
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = numpy.arange(2, dtype="float32")
foo(A, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for freevar in cuda macros<commit_after>
|
from __future__ import print_function, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
class TestFreeVar(unittest.TestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in macros
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = numpy.arange(2, dtype="float32")
foo(A, 0)
if __name__ == '__main__':
unittest.main()
|
Add test for freevar in cuda macrosfrom __future__ import print_function, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
class TestFreeVar(unittest.TestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in macros
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = numpy.arange(2, dtype="float32")
foo(A, 0)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for freevar in cuda macros<commit_after>from __future__ import print_function, absolute_import
import numpy
from numba import cuda
from numba.cuda.testing import unittest
class TestFreeVar(unittest.TestCase):
def test_freevar(self):
"""Make sure we can compile the following kernel with freevar reference
in macros
"""
from numba import float32
size = 1024
nbtype = float32
@cuda.jit("(float32[::1], intp)")
def foo(A, i):
"Dummy function"
sdata = cuda.shared.array(size, # size is freevar
dtype=nbtype) # nbtype is freevar
A[i] = sdata[i]
A = numpy.arange(2, dtype="float32")
foo(A, 0)
if __name__ == '__main__':
unittest.main()
|
|
aa7fdb1d7a7b42dee7bcfcf1eb4a4a5efcb3a2e3
|
go/api/go_api/tests/utils.py
|
go/api/go_api/tests/utils.py
|
import json
from django.conf import settings
from mock import Mock, patch
class MockRpc(object):
def setUp(self):
self.response = self.set_response()
self.request = None
self.rpc_patcher = patch('go.api.go_api.client.rpc')
self.mock_rpc = self.rpc_patcher.start()
self.mock_rpc.side_effect = self.set_request
def set_request(self, session_id, method, params, id=None):
self.request = {
'session_id': session_id,
'method': method,
'params': params,
'id': id,
}
return self.response
def tearDown(self):
self.rpc_patcher.stop()
@staticmethod
def make_response_data(result={}, id=None):
return {
'id': id,
'jsonrpc': '2.0',
'result': result,
}
def set_response(self, result={}, id=None):
response = Mock()
response.status_code = 200
response.json = self.make_response_data(result, id)
self.response = response
def set_rpc_error_response(self, error, id=None):
response = Mock()
response.status_code = 200
data = self.make_response_data(result=None, id=id)
data.error = error
response.json = data
self.response = response
def set_error_response(self, code, text):
response = Mock()
response.status_code = code
response.text = text
self.response = response
def check_request(self, session_id, method, params, id=None):
self.mock_post.assert_called_once_with(
settings.GO_API_URL,
auth=('session_id', session_id),
data=json.dumps({
'jsonrpc': '2.0',
'id': id,
'params': params,
'method': method,
}))
|
Add MockRpc test utility class
|
Add MockRpc test utility class
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
Add MockRpc test utility class
|
import json
from django.conf import settings
from mock import Mock, patch
class MockRpc(object):
def setUp(self):
self.response = self.set_response()
self.request = None
self.rpc_patcher = patch('go.api.go_api.client.rpc')
self.mock_rpc = self.rpc_patcher.start()
self.mock_rpc.side_effect = self.set_request
def set_request(self, session_id, method, params, id=None):
self.request = {
'session_id': session_id,
'method': method,
'params': params,
'id': id,
}
return self.response
def tearDown(self):
self.rpc_patcher.stop()
@staticmethod
def make_response_data(result={}, id=None):
return {
'id': id,
'jsonrpc': '2.0',
'result': result,
}
def set_response(self, result={}, id=None):
response = Mock()
response.status_code = 200
response.json = self.make_response_data(result, id)
self.response = response
def set_rpc_error_response(self, error, id=None):
response = Mock()
response.status_code = 200
data = self.make_response_data(result=None, id=id)
data.error = error
response.json = data
self.response = response
def set_error_response(self, code, text):
response = Mock()
response.status_code = code
response.text = text
self.response = response
def check_request(self, session_id, method, params, id=None):
self.mock_post.assert_called_once_with(
settings.GO_API_URL,
auth=('session_id', session_id),
data=json.dumps({
'jsonrpc': '2.0',
'id': id,
'params': params,
'method': method,
}))
|
<commit_before><commit_msg>Add MockRpc test utility class<commit_after>
|
import json
from django.conf import settings
from mock import Mock, patch
class MockRpc(object):
def setUp(self):
self.response = self.set_response()
self.request = None
self.rpc_patcher = patch('go.api.go_api.client.rpc')
self.mock_rpc = self.rpc_patcher.start()
self.mock_rpc.side_effect = self.set_request
def set_request(self, session_id, method, params, id=None):
self.request = {
'session_id': session_id,
'method': method,
'params': params,
'id': id,
}
return self.response
def tearDown(self):
self.rpc_patcher.stop()
@staticmethod
def make_response_data(result={}, id=None):
return {
'id': id,
'jsonrpc': '2.0',
'result': result,
}
def set_response(self, result={}, id=None):
response = Mock()
response.status_code = 200
response.json = self.make_response_data(result, id)
self.response = response
def set_rpc_error_response(self, error, id=None):
response = Mock()
response.status_code = 200
data = self.make_response_data(result=None, id=id)
data.error = error
response.json = data
self.response = response
def set_error_response(self, code, text):
response = Mock()
response.status_code = code
response.text = text
self.response = response
def check_request(self, session_id, method, params, id=None):
self.mock_post.assert_called_once_with(
settings.GO_API_URL,
auth=('session_id', session_id),
data=json.dumps({
'jsonrpc': '2.0',
'id': id,
'params': params,
'method': method,
}))
|
Add MockRpc test utility classimport json
from django.conf import settings
from mock import Mock, patch
class MockRpc(object):
def setUp(self):
self.response = self.set_response()
self.request = None
self.rpc_patcher = patch('go.api.go_api.client.rpc')
self.mock_rpc = self.rpc_patcher.start()
self.mock_rpc.side_effect = self.set_request
def set_request(self, session_id, method, params, id=None):
self.request = {
'session_id': session_id,
'method': method,
'params': params,
'id': id,
}
return self.response
def tearDown(self):
self.rpc_patcher.stop()
@staticmethod
def make_response_data(result={}, id=None):
return {
'id': id,
'jsonrpc': '2.0',
'result': result,
}
def set_response(self, result={}, id=None):
response = Mock()
response.status_code = 200
response.json = self.make_response_data(result, id)
self.response = response
def set_rpc_error_response(self, error, id=None):
response = Mock()
response.status_code = 200
data = self.make_response_data(result=None, id=id)
data.error = error
response.json = data
self.response = response
def set_error_response(self, code, text):
response = Mock()
response.status_code = code
response.text = text
self.response = response
def check_request(self, session_id, method, params, id=None):
self.mock_post.assert_called_once_with(
settings.GO_API_URL,
auth=('session_id', session_id),
data=json.dumps({
'jsonrpc': '2.0',
'id': id,
'params': params,
'method': method,
}))
|
<commit_before><commit_msg>Add MockRpc test utility class<commit_after>import json
from django.conf import settings
from mock import Mock, patch
class MockRpc(object):
def setUp(self):
self.response = self.set_response()
self.request = None
self.rpc_patcher = patch('go.api.go_api.client.rpc')
self.mock_rpc = self.rpc_patcher.start()
self.mock_rpc.side_effect = self.set_request
def set_request(self, session_id, method, params, id=None):
self.request = {
'session_id': session_id,
'method': method,
'params': params,
'id': id,
}
return self.response
def tearDown(self):
self.rpc_patcher.stop()
@staticmethod
def make_response_data(result={}, id=None):
return {
'id': id,
'jsonrpc': '2.0',
'result': result,
}
def set_response(self, result={}, id=None):
response = Mock()
response.status_code = 200
response.json = self.make_response_data(result, id)
self.response = response
def set_rpc_error_response(self, error, id=None):
response = Mock()
response.status_code = 200
data = self.make_response_data(result=None, id=id)
data.error = error
response.json = data
self.response = response
def set_error_response(self, code, text):
response = Mock()
response.status_code = code
response.text = text
self.response = response
def check_request(self, session_id, method, params, id=None):
self.mock_post.assert_called_once_with(
settings.GO_API_URL,
auth=('session_id', session_id),
data=json.dumps({
'jsonrpc': '2.0',
'id': id,
'params': params,
'method': method,
}))
|
|
9b0bec099dbc8bac73a19407416770d7c194d32f
|
cptm/corpusstatistics.py
|
cptm/corpusstatistics.py
|
import logging
import argparse
import glob
from cptm.utils.experiment import load_config, get_corpus
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory containing the raw data.')
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
print 'Folia files'
y = '{}/*/'.format(args.data_dir)
years = glob.glob(y)
print '# of years,{}'.format(len(years))
for year in years:
data_files = glob.glob('{}data_folia/*.xml.gz'.format(year))
print '{},{}'.format(year, len(data_files))
config = load_config(args.json)
input_dir = config.get('inputData')
print '\ntext files'
perspectives = glob.glob(input_dir)
print '# of perspectives,{}'.format(len(perspectives))
total = 0
for persp in perspectives:
data_files = glob.glob('{}/*.txt'.format(persp))
total += len(data_files)
print '{},{}'.format(persp, len(data_files))
print 'total,{}'.format(total)
|
Add script to calculate basic corpus statistics
|
Add script to calculate basic corpus statistics
- # of folia files (total and per parliamentary year)
- text files (total and per perspective)
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to calculate basic corpus statistics
- # of folia files (total and per parliamentary year)
- text files (total and per perspective)
|
import logging
import argparse
import glob
from cptm.utils.experiment import load_config, get_corpus
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory containing the raw data.')
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
print 'Folia files'
y = '{}/*/'.format(args.data_dir)
years = glob.glob(y)
print '# of years,{}'.format(len(years))
for year in years:
data_files = glob.glob('{}data_folia/*.xml.gz'.format(year))
print '{},{}'.format(year, len(data_files))
config = load_config(args.json)
input_dir = config.get('inputData')
print '\ntext files'
perspectives = glob.glob(input_dir)
print '# of perspectives,{}'.format(len(perspectives))
total = 0
for persp in perspectives:
data_files = glob.glob('{}/*.txt'.format(persp))
total += len(data_files)
print '{},{}'.format(persp, len(data_files))
print 'total,{}'.format(total)
|
<commit_before><commit_msg>Add script to calculate basic corpus statistics
- # of folia files (total and per parliamentary year)
- text files (total and per perspective)<commit_after>
|
import logging
import argparse
import glob
from cptm.utils.experiment import load_config, get_corpus
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory containing the raw data.')
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
print 'Folia files'
y = '{}/*/'.format(args.data_dir)
years = glob.glob(y)
print '# of years,{}'.format(len(years))
for year in years:
data_files = glob.glob('{}data_folia/*.xml.gz'.format(year))
print '{},{}'.format(year, len(data_files))
config = load_config(args.json)
input_dir = config.get('inputData')
print '\ntext files'
perspectives = glob.glob(input_dir)
print '# of perspectives,{}'.format(len(perspectives))
total = 0
for persp in perspectives:
data_files = glob.glob('{}/*.txt'.format(persp))
total += len(data_files)
print '{},{}'.format(persp, len(data_files))
print 'total,{}'.format(total)
|
Add script to calculate basic corpus statistics
- # of folia files (total and per parliamentary year)
- text files (total and per perspective)import logging
import argparse
import glob
from cptm.utils.experiment import load_config, get_corpus
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory containing the raw data.')
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
print 'Folia files'
y = '{}/*/'.format(args.data_dir)
years = glob.glob(y)
print '# of years,{}'.format(len(years))
for year in years:
data_files = glob.glob('{}data_folia/*.xml.gz'.format(year))
print '{},{}'.format(year, len(data_files))
config = load_config(args.json)
input_dir = config.get('inputData')
print '\ntext files'
perspectives = glob.glob(input_dir)
print '# of perspectives,{}'.format(len(perspectives))
total = 0
for persp in perspectives:
data_files = glob.glob('{}/*.txt'.format(persp))
total += len(data_files)
print '{},{}'.format(persp, len(data_files))
print 'total,{}'.format(total)
|
<commit_before><commit_msg>Add script to calculate basic corpus statistics
- # of folia files (total and per parliamentary year)
- text files (total and per perspective)<commit_after>import logging
import argparse
import glob
from cptm.utils.experiment import load_config, get_corpus
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', help='directory containing the raw data.')
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
print 'Folia files'
y = '{}/*/'.format(args.data_dir)
years = glob.glob(y)
print '# of years,{}'.format(len(years))
for year in years:
data_files = glob.glob('{}data_folia/*.xml.gz'.format(year))
print '{},{}'.format(year, len(data_files))
config = load_config(args.json)
input_dir = config.get('inputData')
print '\ntext files'
perspectives = glob.glob(input_dir)
print '# of perspectives,{}'.format(len(perspectives))
total = 0
for persp in perspectives:
data_files = glob.glob('{}/*.txt'.format(persp))
total += len(data_files)
print '{},{}'.format(persp, len(data_files))
print 'total,{}'.format(total)
|
|
afd8acb520d2e5dbebbc3ea243774f343d34fc68
|
tests/test_login.py
|
tests/test_login.py
|
import pytest
from selenium import webdriver
@pytest.fixture
def wd(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(wd):
wd.get("http://localhost/litecart/admin/")
wd.find_element_by_name("username").send_keys("admin")
wd.find_element_by_name("password").send_keys("admin")
wd.find_element_by_name("login").click()
assert not wd.current_url.endswith("/login_page.php")
|
Revert "Revert "Homework 3 (Login test developed)""
|
Revert "Revert "Homework 3 (Login test developed)""
This reverts commit 4175bef3bae18fed12eb5ed1cac4d405af2f9613.
|
Python
|
apache-2.0
|
rgurevych/webdriver_training
|
Revert "Revert "Homework 3 (Login test developed)""
This reverts commit 4175bef3bae18fed12eb5ed1cac4d405af2f9613.
|
import pytest
from selenium import webdriver
@pytest.fixture
def wd(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(wd):
wd.get("http://localhost/litecart/admin/")
wd.find_element_by_name("username").send_keys("admin")
wd.find_element_by_name("password").send_keys("admin")
wd.find_element_by_name("login").click()
assert not wd.current_url.endswith("/login_page.php")
|
<commit_before><commit_msg>Revert "Revert "Homework 3 (Login test developed)""
This reverts commit 4175bef3bae18fed12eb5ed1cac4d405af2f9613.<commit_after>
|
import pytest
from selenium import webdriver
@pytest.fixture
def wd(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(wd):
wd.get("http://localhost/litecart/admin/")
wd.find_element_by_name("username").send_keys("admin")
wd.find_element_by_name("password").send_keys("admin")
wd.find_element_by_name("login").click()
assert not wd.current_url.endswith("/login_page.php")
|
Revert "Revert "Homework 3 (Login test developed)""
This reverts commit 4175bef3bae18fed12eb5ed1cac4d405af2f9613.import pytest
from selenium import webdriver
@pytest.fixture
def wd(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(wd):
wd.get("http://localhost/litecart/admin/")
wd.find_element_by_name("username").send_keys("admin")
wd.find_element_by_name("password").send_keys("admin")
wd.find_element_by_name("login").click()
assert not wd.current_url.endswith("/login_page.php")
|
<commit_before><commit_msg>Revert "Revert "Homework 3 (Login test developed)""
This reverts commit 4175bef3bae18fed12eb5ed1cac4d405af2f9613.<commit_after>import pytest
from selenium import webdriver
@pytest.fixture
def wd(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.quit)
return wd
def test_example(wd):
wd.get("http://localhost/litecart/admin/")
wd.find_element_by_name("username").send_keys("admin")
wd.find_element_by_name("password").send_keys("admin")
wd.find_element_by_name("login").click()
assert not wd.current_url.endswith("/login_page.php")
|
|
3d4d245632e87df97c5815710fa13281e700a12b
|
press_releases/migrations/0006_auto_20161115_1118.py
|
press_releases/migrations/0006_auto_20161115_1118.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0009_auto_20161026_2044'),
('icekit_press_releases', '0005_auto_20161110_1531'),
]
operations = [
migrations.AddField(
model_name='pressreleaselisting',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
|
Add icekit hero mixins to listing page.
|
Add icekit hero mixins to listing page.
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/icekit-press-releases,ic-labs/icekit-press-releases,ic-labs/django-icekit
|
Add icekit hero mixins to listing page.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0009_auto_20161026_2044'),
('icekit_press_releases', '0005_auto_20161110_1531'),
]
operations = [
migrations.AddField(
model_name='pressreleaselisting',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
|
<commit_before><commit_msg>Add icekit hero mixins to listing page.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0009_auto_20161026_2044'),
('icekit_press_releases', '0005_auto_20161110_1531'),
]
operations = [
migrations.AddField(
model_name='pressreleaselisting',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
|
Add icekit hero mixins to listing page.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0009_auto_20161026_2044'),
('icekit_press_releases', '0005_auto_20161110_1531'),
]
operations = [
migrations.AddField(
model_name='pressreleaselisting',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
|
<commit_before><commit_msg>Add icekit hero mixins to listing page.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_image', '0009_auto_20161026_2044'),
('icekit_press_releases', '0005_auto_20161110_1531'),
]
operations = [
migrations.AddField(
model_name='pressreleaselisting',
name='boosted_search_terms',
field=models.TextField(blank=True, help_text='Words (space-separated) added here are boosted in relevance for search results increasing the chance of this appearing higher in the search results.'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='hero_image',
field=models.ForeignKey(related_name='+', blank=True, null=True, help_text=b'The hero image for this content.', to='icekit_plugins_image.Image'),
),
migrations.AddField(
model_name='pressreleaselisting',
name='list_image',
field=models.ImageField(blank=True, help_text=b"image to use in listings. Default image is used if this isn't given", upload_to=b'icekit/listable/list_image/'),
),
]
|
|
69d50d15056cc996288f572f9177173087732f07
|
examples/upload_software.py
|
examples/upload_software.py
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import cytomine
import logging
import sys
from cytomine import Cytomine
from cytomine.models import Software
from argparse import ArgumentParser
__author__ = "Grégoire Vincke <gregoire.vincke@cytomine.coop>"
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--software_name', dest='software_name',
help="The name of your Software")
parser.add_argument('--filepath', dest='filepath',
help="The filepath (on your file system) of the file you want to upload")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine:
software = Software(name=params.software_name).upload(params.filepath)
|
Add an example of script to upload a Software, using V3.0.0 new Software architecture
|
Add an example of script to upload a Software, using V3.0.0 new Software architecture
|
Python
|
apache-2.0
|
cytomine/Cytomine-python-client,cytomine/Cytomine-python-client
|
Add an example of script to upload a Software, using V3.0.0 new Software architecture
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import cytomine
import logging
import sys
from cytomine import Cytomine
from cytomine.models import Software
from argparse import ArgumentParser
__author__ = "Grégoire Vincke <gregoire.vincke@cytomine.coop>"
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--software_name', dest='software_name',
help="The name of your Software")
parser.add_argument('--filepath', dest='filepath',
help="The filepath (on your file system) of the file you want to upload")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine:
software = Software(name=params.software_name).upload(params.filepath)
|
<commit_before><commit_msg>Add an example of script to upload a Software, using V3.0.0 new Software architecture<commit_after>
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import cytomine
import logging
import sys
from cytomine import Cytomine
from cytomine.models import Software
from argparse import ArgumentParser
__author__ = "Grégoire Vincke <gregoire.vincke@cytomine.coop>"
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--software_name', dest='software_name',
help="The name of your Software")
parser.add_argument('--filepath', dest='filepath',
help="The filepath (on your file system) of the file you want to upload")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine:
software = Software(name=params.software_name).upload(params.filepath)
|
Add an example of script to upload a Software, using V3.0.0 new Software architecture# -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import cytomine
import logging
import sys
from cytomine import Cytomine
from cytomine.models import Software
from argparse import ArgumentParser
__author__ = "Grégoire Vincke <gregoire.vincke@cytomine.coop>"
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--software_name', dest='software_name',
help="The name of your Software")
parser.add_argument('--filepath', dest='filepath',
help="The filepath (on your file system) of the file you want to upload")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine:
software = Software(name=params.software_name).upload(params.filepath)
|
<commit_before><commit_msg>Add an example of script to upload a Software, using V3.0.0 new Software architecture<commit_after># -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import cytomine
import logging
import sys
from cytomine import Cytomine
from cytomine.models import Software
from argparse import ArgumentParser
__author__ = "Grégoire Vincke <gregoire.vincke@cytomine.coop>"
if __name__ == '__main__':
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host',
default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key',
help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key',
help="The Cytomine private key")
parser.add_argument('--software_name', dest='software_name',
help="The name of your Software")
parser.add_argument('--filepath', dest='filepath',
help="The filepath (on your file system) of the file you want to upload")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key, verbose=logging.INFO) as cytomine:
software = Software(name=params.software_name).upload(params.filepath)
|
|
1e8197fee5031ee7ba384eb537b13f381a837685
|
neutron/tests/unit/conf/policies/test_service_type.py
|
neutron/tests/unit/conf/policies/test_service_type.py
|
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import policy
from neutron.tests.unit.conf.policies import base
class ServiceTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ServiceTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminTests(ServiceTypeAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_service_provider(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_provider', self.target))
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(SystemAdminTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
Add tests for service_type API's new policy rules
|
Add tests for service_type API's new policy rules
Related-blueprint: bp/secure-rbac-roles
Change-Id: I0cbe260fc786d00b014c804ae74d99c8f9685e88
|
Python
|
apache-2.0
|
openstack/neutron,openstack/neutron,openstack/neutron,mahak/neutron,mahak/neutron,mahak/neutron
|
Add tests for service_type API's new policy rules
Related-blueprint: bp/secure-rbac-roles
Change-Id: I0cbe260fc786d00b014c804ae74d99c8f9685e88
|
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import policy
from neutron.tests.unit.conf.policies import base
class ServiceTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ServiceTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminTests(ServiceTypeAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_service_provider(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_provider', self.target))
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(SystemAdminTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
<commit_before><commit_msg>Add tests for service_type API's new policy rules
Related-blueprint: bp/secure-rbac-roles
Change-Id: I0cbe260fc786d00b014c804ae74d99c8f9685e88<commit_after>
|
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import policy
from neutron.tests.unit.conf.policies import base
class ServiceTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ServiceTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminTests(ServiceTypeAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_service_provider(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_provider', self.target))
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(SystemAdminTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
Add tests for service_type API's new policy rules
Related-blueprint: bp/secure-rbac-roles
Change-Id: I0cbe260fc786d00b014c804ae74d99c8f9685e88# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import policy
from neutron.tests.unit.conf.policies import base
class ServiceTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ServiceTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminTests(ServiceTypeAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_service_provider(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_provider', self.target))
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(SystemAdminTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
<commit_before><commit_msg>Add tests for service_type API's new policy rules
Related-blueprint: bp/secure-rbac-roles
Change-Id: I0cbe260fc786d00b014c804ae74d99c8f9685e88<commit_after># Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import policy
from neutron.tests.unit.conf.policies import base
class ServiceTypeAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(ServiceTypeAPITestCase, self).setUp()
self.target = {}
class SystemAdminTests(ServiceTypeAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_get_service_provider(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_provider', self.target))
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(SystemAdminTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
|
058b5f755ed3b9bbe5ee9fe863707ab5d9def5db
|
transfers/pre-transfer/00_file_to_folder.py
|
transfers/pre-transfer/00_file_to_folder.py
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import datetime
import os
import sys
def main(transfer_path):
"""
If transfer_path is a file, move into a directory of the same name.
"""
if os.path.isdir(transfer_path):
return 1
# Move file into temp dir
transfer_dir, transfer_name = os.path.split(transfer_path)
temp_dir = os.path.join(transfer_dir, 'temp-' + str(datetime.datetime.utcnow()))
os.mkdir(temp_dir)
os.rename(transfer_path, os.path.join(temp_dir, transfer_name))
# Rename temp dir to the same as the file
os.rename(temp_dir, transfer_path)
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
|
Automate Transfers: Sample pre-transfer to move files to folders
|
Automate Transfers: Sample pre-transfer to move files to folders
While a couple transfer types can accept files as well as folders, other
pre-transfer scripts (eg adding a default processing config) require a
folder.
|
Python
|
agpl-3.0
|
artefactual/automation-tools,artefactual/automation-tools
|
Automate Transfers: Sample pre-transfer to move files to folders
While a couple transfer types can accept files as well as folders, other
pre-transfer scripts (eg adding a default processing config) require a
folder.
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import datetime
import os
import sys
def main(transfer_path):
"""
If transfer_path is a file, move into a directory of the same name.
"""
if os.path.isdir(transfer_path):
return 1
# Move file into temp dir
transfer_dir, transfer_name = os.path.split(transfer_path)
temp_dir = os.path.join(transfer_dir, 'temp-' + str(datetime.datetime.utcnow()))
os.mkdir(temp_dir)
os.rename(transfer_path, os.path.join(temp_dir, transfer_name))
# Rename temp dir to the same as the file
os.rename(temp_dir, transfer_path)
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
|
<commit_before><commit_msg>Automate Transfers: Sample pre-transfer to move files to folders
While a couple transfer types can accept files as well as folders, other
pre-transfer scripts (eg adding a default processing config) require a
folder.<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import datetime
import os
import sys
def main(transfer_path):
"""
If transfer_path is a file, move into a directory of the same name.
"""
if os.path.isdir(transfer_path):
return 1
# Move file into temp dir
transfer_dir, transfer_name = os.path.split(transfer_path)
temp_dir = os.path.join(transfer_dir, 'temp-' + str(datetime.datetime.utcnow()))
os.mkdir(temp_dir)
os.rename(transfer_path, os.path.join(temp_dir, transfer_name))
# Rename temp dir to the same as the file
os.rename(temp_dir, transfer_path)
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
|
Automate Transfers: Sample pre-transfer to move files to folders
While a couple transfer types can accept files as well as folders, other
pre-transfer scripts (eg adding a default processing config) require a
folder.#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import datetime
import os
import sys
def main(transfer_path):
"""
If transfer_path is a file, move into a directory of the same name.
"""
if os.path.isdir(transfer_path):
return 1
# Move file into temp dir
transfer_dir, transfer_name = os.path.split(transfer_path)
temp_dir = os.path.join(transfer_dir, 'temp-' + str(datetime.datetime.utcnow()))
os.mkdir(temp_dir)
os.rename(transfer_path, os.path.join(temp_dir, transfer_name))
# Rename temp dir to the same as the file
os.rename(temp_dir, transfer_path)
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
|
<commit_before><commit_msg>Automate Transfers: Sample pre-transfer to move files to folders
While a couple transfer types can accept files as well as folders, other
pre-transfer scripts (eg adding a default processing config) require a
folder.<commit_after>#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import datetime
import os
import sys
def main(transfer_path):
"""
If transfer_path is a file, move into a directory of the same name.
"""
if os.path.isdir(transfer_path):
return 1
# Move file into temp dir
transfer_dir, transfer_name = os.path.split(transfer_path)
temp_dir = os.path.join(transfer_dir, 'temp-' + str(datetime.datetime.utcnow()))
os.mkdir(temp_dir)
os.rename(transfer_path, os.path.join(temp_dir, transfer_name))
# Rename temp dir to the same as the file
os.rename(temp_dir, transfer_path)
if __name__ == '__main__':
transfer_path = sys.argv[1]
main(transfer_path)
|
|
ba277017118f440d59a9f053acdb1d9410c173fb
|
pygraphc/clustering/ClusterDistance.py
|
pygraphc/clustering/ClusterDistance.py
|
class ClusterDistance(object):
@staticmethod
def get_cosine_similarity(tfidf1, tfidf2, length1, length2):
vector_products = 0
for ti1 in tfidf1:
for ti2 in tfidf2:
if ti1[0] == ti2[0]:
vector_products += ti1[1] * ti2[1]
try:
cosine_similarity = vector_products / (length1 * length2)
except ZeroDivisionError:
cosine_similarity = 0
return round(cosine_similarity, 3)
|
Add static method for distance similarity
|
Add static method for distance similarity
|
Python
|
mit
|
studiawan/pygraphc
|
Add static method for distance similarity
|
class ClusterDistance(object):
@staticmethod
def get_cosine_similarity(tfidf1, tfidf2, length1, length2):
vector_products = 0
for ti1 in tfidf1:
for ti2 in tfidf2:
if ti1[0] == ti2[0]:
vector_products += ti1[1] * ti2[1]
try:
cosine_similarity = vector_products / (length1 * length2)
except ZeroDivisionError:
cosine_similarity = 0
return round(cosine_similarity, 3)
|
<commit_before><commit_msg>Add static method for distance similarity<commit_after>
|
class ClusterDistance(object):
@staticmethod
def get_cosine_similarity(tfidf1, tfidf2, length1, length2):
vector_products = 0
for ti1 in tfidf1:
for ti2 in tfidf2:
if ti1[0] == ti2[0]:
vector_products += ti1[1] * ti2[1]
try:
cosine_similarity = vector_products / (length1 * length2)
except ZeroDivisionError:
cosine_similarity = 0
return round(cosine_similarity, 3)
|
Add static method for distance similarity
class ClusterDistance(object):
@staticmethod
def get_cosine_similarity(tfidf1, tfidf2, length1, length2):
vector_products = 0
for ti1 in tfidf1:
for ti2 in tfidf2:
if ti1[0] == ti2[0]:
vector_products += ti1[1] * ti2[1]
try:
cosine_similarity = vector_products / (length1 * length2)
except ZeroDivisionError:
cosine_similarity = 0
return round(cosine_similarity, 3)
|
<commit_before><commit_msg>Add static method for distance similarity<commit_after>
class ClusterDistance(object):
@staticmethod
def get_cosine_similarity(tfidf1, tfidf2, length1, length2):
vector_products = 0
for ti1 in tfidf1:
for ti2 in tfidf2:
if ti1[0] == ti2[0]:
vector_products += ti1[1] * ti2[1]
try:
cosine_similarity = vector_products / (length1 * length2)
except ZeroDivisionError:
cosine_similarity = 0
return round(cosine_similarity, 3)
|
|
86dade7f56370d5c15176abb1b1b4277ae9226ee
|
python_scripts/solr_query_immigrant.py
|
python_scripts/solr_query_immigrant.py
|
#!/usr/bin/python
import ipdb
#import time
#import csv
import sys
import pysolr
import dateutil.parser
def get_word_counts( solr, query, date_str, count=1000 ) :
date = dateutil.parser.parse( date_str )
documents = []
date_str = date.isoformat() + 'Z'
date_query = "publish_date:[{0} TO {0}+7DAYS]".format(date_str)
sys.stderr.write( ' starting fetch for ' + query )
sys.stderr.write( "\n");
facet_field = "includes"
results = solr.search( query, **{
'facet':"true",
"facet.limit":count,
"facet.field": facet_field,
"facet.method":"enum",
"fq": date_query,
})
facets = results.facets['facet_fields']['includes']
counts = dict(zip(facets[0::2],facets[1::2]))
return counts
def counts_to_db_style( counts ) :
ret = []
total_words = sum( counts.values() )
stem_count_factor = 1
for word,count in counts.iteritems() :
ret.append( { 'term': word,
'stem_count': float(count)/float(total_words),
'raw_stem_count': count,
'total_words': total_words,
'stem_count_factor': stem_count_factor,
}
)
return ret
def solr_connection() :
return pysolr.Solr('http://localhost:8983/solr/')
solr = solr_connection()
join_query = '{!join from=media_id_inner to=media_id}media_sets_id:1'
ipdb.set_trace()
common_fq_params = ['field_type:st', 'publish_date:[2012-01-01T00:00:00Z TO NOW]']
query_specific_fq_params = [ 'title:immigrants' ]
fq_params = common_fq_params + query_specific_fq_params
result = solr.search( join_query, **{
'facet':"true",
'facet.range.start':'2012-01-01T00:00:00Z',
'facet.range':'publish_date',
'facet.range.end':['NOW','NOW'],
'facet.range.gap':'+1MONTH',
'fq': fq_params,
})
facet_counts = result.facets['facet_ranges']['publish_date']['counts']
print result
#results = get_word_counts(solr_connection(), 'sentence:the', '2013-08-10', count=100);
#print results
#print results.facets['facet_fields']['includes']
|
Add script to measure usage of immigrant / illegal immigrant
|
Add script to measure usage of immigrant / illegal immigrant
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud
|
Add script to measure usage of immigrant / illegal immigrant
|
#!/usr/bin/python
import ipdb
#import time
#import csv
import sys
import pysolr
import dateutil.parser
def get_word_counts( solr, query, date_str, count=1000 ) :
date = dateutil.parser.parse( date_str )
documents = []
date_str = date.isoformat() + 'Z'
date_query = "publish_date:[{0} TO {0}+7DAYS]".format(date_str)
sys.stderr.write( ' starting fetch for ' + query )
sys.stderr.write( "\n");
facet_field = "includes"
results = solr.search( query, **{
'facet':"true",
"facet.limit":count,
"facet.field": facet_field,
"facet.method":"enum",
"fq": date_query,
})
facets = results.facets['facet_fields']['includes']
counts = dict(zip(facets[0::2],facets[1::2]))
return counts
def counts_to_db_style( counts ) :
ret = []
total_words = sum( counts.values() )
stem_count_factor = 1
for word,count in counts.iteritems() :
ret.append( { 'term': word,
'stem_count': float(count)/float(total_words),
'raw_stem_count': count,
'total_words': total_words,
'stem_count_factor': stem_count_factor,
}
)
return ret
def solr_connection() :
return pysolr.Solr('http://localhost:8983/solr/')
solr = solr_connection()
join_query = '{!join from=media_id_inner to=media_id}media_sets_id:1'
ipdb.set_trace()
common_fq_params = ['field_type:st', 'publish_date:[2012-01-01T00:00:00Z TO NOW]']
query_specific_fq_params = [ 'title:immigrants' ]
fq_params = common_fq_params + query_specific_fq_params
result = solr.search( join_query, **{
'facet':"true",
'facet.range.start':'2012-01-01T00:00:00Z',
'facet.range':'publish_date',
'facet.range.end':['NOW','NOW'],
'facet.range.gap':'+1MONTH',
'fq': fq_params,
})
facet_counts = result.facets['facet_ranges']['publish_date']['counts']
print result
#results = get_word_counts(solr_connection(), 'sentence:the', '2013-08-10', count=100);
#print results
#print results.facets['facet_fields']['includes']
|
<commit_before><commit_msg>Add script to measure usage of immigrant / illegal immigrant<commit_after>
|
#!/usr/bin/python
import ipdb
#import time
#import csv
import sys
import pysolr
import dateutil.parser
def get_word_counts( solr, query, date_str, count=1000 ) :
date = dateutil.parser.parse( date_str )
documents = []
date_str = date.isoformat() + 'Z'
date_query = "publish_date:[{0} TO {0}+7DAYS]".format(date_str)
sys.stderr.write( ' starting fetch for ' + query )
sys.stderr.write( "\n");
facet_field = "includes"
results = solr.search( query, **{
'facet':"true",
"facet.limit":count,
"facet.field": facet_field,
"facet.method":"enum",
"fq": date_query,
})
facets = results.facets['facet_fields']['includes']
counts = dict(zip(facets[0::2],facets[1::2]))
return counts
def counts_to_db_style( counts ) :
ret = []
total_words = sum( counts.values() )
stem_count_factor = 1
for word,count in counts.iteritems() :
ret.append( { 'term': word,
'stem_count': float(count)/float(total_words),
'raw_stem_count': count,
'total_words': total_words,
'stem_count_factor': stem_count_factor,
}
)
return ret
def solr_connection() :
return pysolr.Solr('http://localhost:8983/solr/')
solr = solr_connection()
join_query = '{!join from=media_id_inner to=media_id}media_sets_id:1'
ipdb.set_trace()
common_fq_params = ['field_type:st', 'publish_date:[2012-01-01T00:00:00Z TO NOW]']
query_specific_fq_params = [ 'title:immigrants' ]
fq_params = common_fq_params + query_specific_fq_params
result = solr.search( join_query, **{
'facet':"true",
'facet.range.start':'2012-01-01T00:00:00Z',
'facet.range':'publish_date',
'facet.range.end':['NOW','NOW'],
'facet.range.gap':'+1MONTH',
'fq': fq_params,
})
facet_counts = result.facets['facet_ranges']['publish_date']['counts']
print result
#results = get_word_counts(solr_connection(), 'sentence:the', '2013-08-10', count=100);
#print results
#print results.facets['facet_fields']['includes']
|
Add script to measure usage of immigrant / illegal immigrant#!/usr/bin/python
import ipdb
#import time
#import csv
import sys
import pysolr
import dateutil.parser
def get_word_counts( solr, query, date_str, count=1000 ) :
date = dateutil.parser.parse( date_str )
documents = []
date_str = date.isoformat() + 'Z'
date_query = "publish_date:[{0} TO {0}+7DAYS]".format(date_str)
sys.stderr.write( ' starting fetch for ' + query )
sys.stderr.write( "\n");
facet_field = "includes"
results = solr.search( query, **{
'facet':"true",
"facet.limit":count,
"facet.field": facet_field,
"facet.method":"enum",
"fq": date_query,
})
facets = results.facets['facet_fields']['includes']
counts = dict(zip(facets[0::2],facets[1::2]))
return counts
def counts_to_db_style( counts ) :
ret = []
total_words = sum( counts.values() )
stem_count_factor = 1
for word,count in counts.iteritems() :
ret.append( { 'term': word,
'stem_count': float(count)/float(total_words),
'raw_stem_count': count,
'total_words': total_words,
'stem_count_factor': stem_count_factor,
}
)
return ret
def solr_connection() :
return pysolr.Solr('http://localhost:8983/solr/')
solr = solr_connection()
join_query = '{!join from=media_id_inner to=media_id}media_sets_id:1'
ipdb.set_trace()
common_fq_params = ['field_type:st', 'publish_date:[2012-01-01T00:00:00Z TO NOW]']
query_specific_fq_params = [ 'title:immigrants' ]
fq_params = common_fq_params + query_specific_fq_params
result = solr.search( join_query, **{
'facet':"true",
'facet.range.start':'2012-01-01T00:00:00Z',
'facet.range':'publish_date',
'facet.range.end':['NOW','NOW'],
'facet.range.gap':'+1MONTH',
'fq': fq_params,
})
facet_counts = result.facets['facet_ranges']['publish_date']['counts']
print result
#results = get_word_counts(solr_connection(), 'sentence:the', '2013-08-10', count=100);
#print results
#print results.facets['facet_fields']['includes']
|
<commit_before><commit_msg>Add script to measure usage of immigrant / illegal immigrant<commit_after>#!/usr/bin/python
import ipdb
#import time
#import csv
import sys
import pysolr
import dateutil.parser
def get_word_counts( solr, query, date_str, count=1000 ) :
date = dateutil.parser.parse( date_str )
documents = []
date_str = date.isoformat() + 'Z'
date_query = "publish_date:[{0} TO {0}+7DAYS]".format(date_str)
sys.stderr.write( ' starting fetch for ' + query )
sys.stderr.write( "\n");
facet_field = "includes"
results = solr.search( query, **{
'facet':"true",
"facet.limit":count,
"facet.field": facet_field,
"facet.method":"enum",
"fq": date_query,
})
facets = results.facets['facet_fields']['includes']
counts = dict(zip(facets[0::2],facets[1::2]))
return counts
def counts_to_db_style( counts ) :
ret = []
total_words = sum( counts.values() )
stem_count_factor = 1
for word,count in counts.iteritems() :
ret.append( { 'term': word,
'stem_count': float(count)/float(total_words),
'raw_stem_count': count,
'total_words': total_words,
'stem_count_factor': stem_count_factor,
}
)
return ret
def solr_connection() :
return pysolr.Solr('http://localhost:8983/solr/')
solr = solr_connection()
join_query = '{!join from=media_id_inner to=media_id}media_sets_id:1'
ipdb.set_trace()
common_fq_params = ['field_type:st', 'publish_date:[2012-01-01T00:00:00Z TO NOW]']
query_specific_fq_params = [ 'title:immigrants' ]
fq_params = common_fq_params + query_specific_fq_params
result = solr.search( join_query, **{
'facet':"true",
'facet.range.start':'2012-01-01T00:00:00Z',
'facet.range':'publish_date',
'facet.range.end':['NOW','NOW'],
'facet.range.gap':'+1MONTH',
'fq': fq_params,
})
facet_counts = result.facets['facet_ranges']['publish_date']['counts']
print result
#results = get_word_counts(solr_connection(), 'sentence:the', '2013-08-10', count=100);
#print results
#print results.facets['facet_fields']['includes']
|
|
5646a1e060cae4016fd3766fa5e9ad5486109e8e
|
tests/validation/cattlevalidationtest/core/test_host_api.py
|
tests/validation/cattlevalidationtest/core/test_host_api.py
|
from common_fixtures import * # NOQA
import websocket as ws
import pytest
def test_host_api_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# valid token and a url to the websocket
stats = hosts[0].stats()
conn = ws.create_connection(stats.url + '?token='+stats.token)
result = conn.recv()
assert result is not None
assert result.startswith('{')
def test_host_api_no_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# Pass No token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url)
assert 'Handshake status 401' in str(excinfo.value)
def test_host_api_garbage_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# pass garbage token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url+'?token=abcd')
assert 'Handshake status 401' in str(excinfo.value)
|
Add test for validation of JWT in host api
|
Add test for validation of JWT in host api
|
Python
|
apache-2.0
|
aruneli/validation-tests,sangeethah/validation-tests,cjellick/validation-tests,sonchang/validation-tests,hibooboo2/validation-tests,rancher/validation-tests,rancherio/validation-tests,sangeethah/validation-tests,cjellick/validation-tests,rancher/validation-tests,hibooboo2/validation-tests,rancherio/validation-tests,wlan0/validation-tests,aruneli/validation-tests,wlan0/validation-tests,sonchang/validation-tests
|
Add test for validation of JWT in host api
|
from common_fixtures import * # NOQA
import websocket as ws
import pytest
def test_host_api_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# valid token and a url to the websocket
stats = hosts[0].stats()
conn = ws.create_connection(stats.url + '?token='+stats.token)
result = conn.recv()
assert result is not None
assert result.startswith('{')
def test_host_api_no_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# Pass No token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url)
assert 'Handshake status 401' in str(excinfo.value)
def test_host_api_garbage_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# pass garbage token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url+'?token=abcd')
assert 'Handshake status 401' in str(excinfo.value)
|
<commit_before><commit_msg>Add test for validation of JWT in host api<commit_after>
|
from common_fixtures import * # NOQA
import websocket as ws
import pytest
def test_host_api_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# valid token and a url to the websocket
stats = hosts[0].stats()
conn = ws.create_connection(stats.url + '?token='+stats.token)
result = conn.recv()
assert result is not None
assert result.startswith('{')
def test_host_api_no_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# Pass No token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url)
assert 'Handshake status 401' in str(excinfo.value)
def test_host_api_garbage_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# pass garbage token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url+'?token=abcd')
assert 'Handshake status 401' in str(excinfo.value)
|
Add test for validation of JWT in host apifrom common_fixtures import * # NOQA
import websocket as ws
import pytest
def test_host_api_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# valid token and a url to the websocket
stats = hosts[0].stats()
conn = ws.create_connection(stats.url + '?token='+stats.token)
result = conn.recv()
assert result is not None
assert result.startswith('{')
def test_host_api_no_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# Pass No token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url)
assert 'Handshake status 401' in str(excinfo.value)
def test_host_api_garbage_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# pass garbage token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url+'?token=abcd')
assert 'Handshake status 401' in str(excinfo.value)
|
<commit_before><commit_msg>Add test for validation of JWT in host api<commit_after>from common_fixtures import * # NOQA
import websocket as ws
import pytest
def test_host_api_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# valid token and a url to the websocket
stats = hosts[0].stats()
conn = ws.create_connection(stats.url + '?token='+stats.token)
result = conn.recv()
assert result is not None
assert result.startswith('{')
def test_host_api_no_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# Pass No token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url)
assert 'Handshake status 401' in str(excinfo.value)
def test_host_api_garbage_token(admin_client):
hosts = admin_client.list_host(kind='docker', removed_null=True)
assert len(hosts) > 0
# pass garbage token
stats = hosts[0].stats()
with pytest.raises(Exception) as excinfo:
ws.create_connection(stats.url+'?token=abcd')
assert 'Handshake status 401' in str(excinfo.value)
|
|
21183a9b42be47aaa81b7132a960aea62f218f15
|
src/tests/ggrc/models/test_control_assessment.py
|
src/tests/ggrc/models/test_control_assessment.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc import db
from ggrc.models import ControlAssessment
from tests.ggrc import TestCase
from tests.ggrc.models.factories import ControlAssessmentFactory
class TestControlAssessment(TestCase):
def test_auto_slug_generation(self):
ControlAssessmentFactory(title="Some title")
db.session.commit()
ca = ControlAssessment.query.first()
self.assertIn("CONTROL-", ca.slug)
self.assertIn(ca.control.slug, ca.slug)
|
Add tests for CA slug generation
|
Add tests for CA slug generation
Test that control assessment slugs are prefixed with control slug.
|
Python
|
apache-2.0
|
VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core
|
Add tests for CA slug generation
Test that control assessment slugs are prefixed with control slug.
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc import db
from ggrc.models import ControlAssessment
from tests.ggrc import TestCase
from tests.ggrc.models.factories import ControlAssessmentFactory
class TestControlAssessment(TestCase):
def test_auto_slug_generation(self):
ControlAssessmentFactory(title="Some title")
db.session.commit()
ca = ControlAssessment.query.first()
self.assertIn("CONTROL-", ca.slug)
self.assertIn(ca.control.slug, ca.slug)
|
<commit_before><commit_msg>Add tests for CA slug generation
Test that control assessment slugs are prefixed with control slug.<commit_after>
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc import db
from ggrc.models import ControlAssessment
from tests.ggrc import TestCase
from tests.ggrc.models.factories import ControlAssessmentFactory
class TestControlAssessment(TestCase):
def test_auto_slug_generation(self):
ControlAssessmentFactory(title="Some title")
db.session.commit()
ca = ControlAssessment.query.first()
self.assertIn("CONTROL-", ca.slug)
self.assertIn(ca.control.slug, ca.slug)
|
Add tests for CA slug generation
Test that control assessment slugs are prefixed with control slug.# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc import db
from ggrc.models import ControlAssessment
from tests.ggrc import TestCase
from tests.ggrc.models.factories import ControlAssessmentFactory
class TestControlAssessment(TestCase):
def test_auto_slug_generation(self):
ControlAssessmentFactory(title="Some title")
db.session.commit()
ca = ControlAssessment.query.first()
self.assertIn("CONTROL-", ca.slug)
self.assertIn(ca.control.slug, ca.slug)
|
<commit_before><commit_msg>Add tests for CA slug generation
Test that control assessment slugs are prefixed with control slug.<commit_after># Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc import db
from ggrc.models import ControlAssessment
from tests.ggrc import TestCase
from tests.ggrc.models.factories import ControlAssessmentFactory
class TestControlAssessment(TestCase):
def test_auto_slug_generation(self):
ControlAssessmentFactory(title="Some title")
db.session.commit()
ca = ControlAssessment.query.first()
self.assertIn("CONTROL-", ca.slug)
self.assertIn(ca.control.slug, ca.slug)
|
|
cb7dd3e7bf8fdc195fa0b37479efa61e0091fbec
|
migrations/versions/201702221443_fb7b6aa148e_delete_legacy_paper_tables.py
|
migrations/versions/201702221443_fb7b6aa148e_delete_legacy_paper_tables.py
|
"""Delete legacy paper tables
Revision ID: fb7b6aa148e
Revises: 25d478c9d690
Create Date: 2017-02-22 14:43:34.952274
"""
import sqlalchemy as sa
from alembic import context, op
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.util.struct.enum import RichIntEnum
# revision identifiers, used by Alembic.
revision = 'fb7b6aa148e'
down_revision = '25d478c9d690'
class PaperReviewingRoleType(RichIntEnum):
reviewer = 0
referee = 1
editor = 2
def upgrade():
if not context.is_offline_mode():
# sanity check to avoid running w/o papers migrated
conn = op.get_bind()
has_new_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.legacy_paper_files)").scalar()
has_old_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.revisions)").scalar()
if has_new_papers != has_old_papers:
raise Exception('Upgrade to {} and run the event_papers zodb import first!'.format(down_revision))
op.drop_table('legacy_paper_files', schema='event_paper_reviewing')
op.drop_table('legacy_contribution_roles', schema='event_paper_reviewing')
def downgrade():
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
|
Add alembic revision to delete legacy paper tables
|
Add alembic revision to delete legacy paper tables
|
Python
|
mit
|
pferreir/indico,ThiefMaster/indico,DirkHoffmann/indico,indico/indico,indico/indico,OmeGak/indico,mic4ael/indico,pferreir/indico,mic4ael/indico,OmeGak/indico,ThiefMaster/indico,mvidalgarcia/indico,ThiefMaster/indico,mic4ael/indico,indico/indico,DirkHoffmann/indico,OmeGak/indico,ThiefMaster/indico,DirkHoffmann/indico,mvidalgarcia/indico,indico/indico,mic4ael/indico,pferreir/indico,pferreir/indico,mvidalgarcia/indico,mvidalgarcia/indico,OmeGak/indico,DirkHoffmann/indico
|
Add alembic revision to delete legacy paper tables
|
"""Delete legacy paper tables
Revision ID: fb7b6aa148e
Revises: 25d478c9d690
Create Date: 2017-02-22 14:43:34.952274
"""
import sqlalchemy as sa
from alembic import context, op
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.util.struct.enum import RichIntEnum
# revision identifiers, used by Alembic.
revision = 'fb7b6aa148e'
down_revision = '25d478c9d690'
class PaperReviewingRoleType(RichIntEnum):
reviewer = 0
referee = 1
editor = 2
def upgrade():
if not context.is_offline_mode():
# sanity check to avoid running w/o papers migrated
conn = op.get_bind()
has_new_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.legacy_paper_files)").scalar()
has_old_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.revisions)").scalar()
if has_new_papers != has_old_papers:
raise Exception('Upgrade to {} and run the event_papers zodb import first!'.format(down_revision))
op.drop_table('legacy_paper_files', schema='event_paper_reviewing')
op.drop_table('legacy_contribution_roles', schema='event_paper_reviewing')
def downgrade():
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
|
<commit_before><commit_msg>Add alembic revision to delete legacy paper tables<commit_after>
|
"""Delete legacy paper tables
Revision ID: fb7b6aa148e
Revises: 25d478c9d690
Create Date: 2017-02-22 14:43:34.952274
"""
import sqlalchemy as sa
from alembic import context, op
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.util.struct.enum import RichIntEnum
# revision identifiers, used by Alembic.
revision = 'fb7b6aa148e'
down_revision = '25d478c9d690'
class PaperReviewingRoleType(RichIntEnum):
reviewer = 0
referee = 1
editor = 2
def upgrade():
if not context.is_offline_mode():
# sanity check to avoid running w/o papers migrated
conn = op.get_bind()
has_new_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.legacy_paper_files)").scalar()
has_old_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.revisions)").scalar()
if has_new_papers != has_old_papers:
raise Exception('Upgrade to {} and run the event_papers zodb import first!'.format(down_revision))
op.drop_table('legacy_paper_files', schema='event_paper_reviewing')
op.drop_table('legacy_contribution_roles', schema='event_paper_reviewing')
def downgrade():
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
|
Add alembic revision to delete legacy paper tables"""Delete legacy paper tables
Revision ID: fb7b6aa148e
Revises: 25d478c9d690
Create Date: 2017-02-22 14:43:34.952274
"""
import sqlalchemy as sa
from alembic import context, op
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.util.struct.enum import RichIntEnum
# revision identifiers, used by Alembic.
revision = 'fb7b6aa148e'
down_revision = '25d478c9d690'
class PaperReviewingRoleType(RichIntEnum):
reviewer = 0
referee = 1
editor = 2
def upgrade():
if not context.is_offline_mode():
# sanity check to avoid running w/o papers migrated
conn = op.get_bind()
has_new_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.legacy_paper_files)").scalar()
has_old_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.revisions)").scalar()
if has_new_papers != has_old_papers:
raise Exception('Upgrade to {} and run the event_papers zodb import first!'.format(down_revision))
op.drop_table('legacy_paper_files', schema='event_paper_reviewing')
op.drop_table('legacy_contribution_roles', schema='event_paper_reviewing')
def downgrade():
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
|
<commit_before><commit_msg>Add alembic revision to delete legacy paper tables<commit_after>"""Delete legacy paper tables
Revision ID: fb7b6aa148e
Revises: 25d478c9d690
Create Date: 2017-02-22 14:43:34.952274
"""
import sqlalchemy as sa
from alembic import context, op
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.util.struct.enum import RichIntEnum
# revision identifiers, used by Alembic.
revision = 'fb7b6aa148e'
down_revision = '25d478c9d690'
class PaperReviewingRoleType(RichIntEnum):
reviewer = 0
referee = 1
editor = 2
def upgrade():
if not context.is_offline_mode():
# sanity check to avoid running w/o papers migrated
conn = op.get_bind()
has_new_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.legacy_paper_files)").scalar()
has_old_papers = conn.execute("SELECT EXISTS (SELECT 1 FROM event_paper_reviewing.revisions)").scalar()
if has_new_papers != has_old_papers:
raise Exception('Upgrade to {} and run the event_papers zodb import first!'.format(down_revision))
op.drop_table('legacy_paper_files', schema='event_paper_reviewing')
op.drop_table('legacy_contribution_roles', schema='event_paper_reviewing')
def downgrade():
op.create_table(
'contribution_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False, index=True),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('role', PyIntEnum(PaperReviewingRoleType), nullable=False, index=True),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
op.create_table(
'paper_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contribution_id', sa.Integer(), nullable=False, index=True),
sa.Column('revision_id', sa.Integer(), nullable=True),
sa.Column('storage_backend', sa.String(), nullable=False),
sa.Column('content_type', sa.String(), nullable=False),
sa.Column('size', sa.BigInteger(), nullable=False),
sa.Column('storage_file_id', sa.String(), nullable=False),
sa.Column('filename', sa.String(), nullable=False),
sa.Column('created_dt', UTCDateTime, nullable=False),
sa.ForeignKeyConstraint(['contribution_id'], ['events.contributions.id']),
sa.PrimaryKeyConstraint('id'),
schema='event_paper_reviewing'
)
|
|
7679915999905528cca912e7d824b1dc4f548d9e
|
tensorflow_model_analysis/notebook/colab/util_test.py
|
tensorflow_model_analysis/notebook/colab/util_test.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import tensorflow as tf
from tensorflow_model_analysis.notebook.colab import util
class UtilTest(tf.test.TestCase):
def testGenerateSimpleHtmlForTfmaComponent(self):
tfma_component_name = 'tfma-nb-slicing-metrics'
html_code = util.generate_html_for_tfma_component(tfma_component_name, None,
None, '')
# Count opening and closing tag
self.assertEqual(html_code.count(tfma_component_name), 2)
def testGenerateForNotTrustedTfmaComponent(self):
tfma_component_name = 'my-component'
with self.assertRaises(ValueError):
util.generate_html_for_tfma_component(tfma_component_name, None, None, '')
if __name__ == '__main__':
tf.test.main()
|
Add unit test for generate_html_for_tfma_component.
|
Add unit test for generate_html_for_tfma_component.
PiperOrigin-RevId: 465824929
|
Python
|
apache-2.0
|
tensorflow/model-analysis,tensorflow/model-analysis,tensorflow/model-analysis,tensorflow/model-analysis
|
Add unit test for generate_html_for_tfma_component.
PiperOrigin-RevId: 465824929
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import tensorflow as tf
from tensorflow_model_analysis.notebook.colab import util
class UtilTest(tf.test.TestCase):
def testGenerateSimpleHtmlForTfmaComponent(self):
tfma_component_name = 'tfma-nb-slicing-metrics'
html_code = util.generate_html_for_tfma_component(tfma_component_name, None,
None, '')
# Count opening and closing tag
self.assertEqual(html_code.count(tfma_component_name), 2)
def testGenerateForNotTrustedTfmaComponent(self):
tfma_component_name = 'my-component'
with self.assertRaises(ValueError):
util.generate_html_for_tfma_component(tfma_component_name, None, None, '')
if __name__ == '__main__':
tf.test.main()
|
<commit_before><commit_msg>Add unit test for generate_html_for_tfma_component.
PiperOrigin-RevId: 465824929<commit_after>
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import tensorflow as tf
from tensorflow_model_analysis.notebook.colab import util
class UtilTest(tf.test.TestCase):
def testGenerateSimpleHtmlForTfmaComponent(self):
tfma_component_name = 'tfma-nb-slicing-metrics'
html_code = util.generate_html_for_tfma_component(tfma_component_name, None,
None, '')
# Count opening and closing tag
self.assertEqual(html_code.count(tfma_component_name), 2)
def testGenerateForNotTrustedTfmaComponent(self):
tfma_component_name = 'my-component'
with self.assertRaises(ValueError):
util.generate_html_for_tfma_component(tfma_component_name, None, None, '')
if __name__ == '__main__':
tf.test.main()
|
Add unit test for generate_html_for_tfma_component.
PiperOrigin-RevId: 465824929# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import tensorflow as tf
from tensorflow_model_analysis.notebook.colab import util
class UtilTest(tf.test.TestCase):
def testGenerateSimpleHtmlForTfmaComponent(self):
tfma_component_name = 'tfma-nb-slicing-metrics'
html_code = util.generate_html_for_tfma_component(tfma_component_name, None,
None, '')
# Count opening and closing tag
self.assertEqual(html_code.count(tfma_component_name), 2)
def testGenerateForNotTrustedTfmaComponent(self):
tfma_component_name = 'my-component'
with self.assertRaises(ValueError):
util.generate_html_for_tfma_component(tfma_component_name, None, None, '')
if __name__ == '__main__':
tf.test.main()
|
<commit_before><commit_msg>Add unit test for generate_html_for_tfma_component.
PiperOrigin-RevId: 465824929<commit_after># Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import tensorflow as tf
from tensorflow_model_analysis.notebook.colab import util
class UtilTest(tf.test.TestCase):
def testGenerateSimpleHtmlForTfmaComponent(self):
tfma_component_name = 'tfma-nb-slicing-metrics'
html_code = util.generate_html_for_tfma_component(tfma_component_name, None,
None, '')
# Count opening and closing tag
self.assertEqual(html_code.count(tfma_component_name), 2)
def testGenerateForNotTrustedTfmaComponent(self):
tfma_component_name = 'my-component'
with self.assertRaises(ValueError):
util.generate_html_for_tfma_component(tfma_component_name, None, None, '')
if __name__ == '__main__':
tf.test.main()
|
|
807e36f4eec5813d8d529de12df275a754b4b077
|
tasks.py
|
tasks.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from invoke import run, task
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__)))
@task
def clean(docs=False, bytecode=False, extra=''):
'''Cleanup all build artifacts'''
patterns = ['build', 'dist', 'cover', 'docs/_build', '**/*.pyc', '*.egg-info']
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def demo():
'''Run the demo'''
run('python {0}/examples/todo.py'.format(ROOT))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=flask_restplus'.format(ROOT), pty=True)
@task
def tox():
'''Run test in all Python versions'''
run('tox', pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/flask_restplus'.format(ROOT))
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def dist():
'''Package for distribution'''
run('cd {0} && python setup.py sdist bdist_wheel'.format(ROOT), pty=True)
@task(tox, doc, qa, dist, default=True)
def all():
pass
|
Use invoke as build tool
|
Use invoke as build tool
|
Python
|
mit
|
luminusnetworks/flask-restplus,marrybird/flask-restplus,marrybird/flask-restplus,leiserfg/flask-restplus,fixedd/flask-restplus,l-vincent-l/flask-restplus,fixedd/flask-restplus,leiserfg/flask-restplus,l-vincent-l/flask-restplus,luminusnetworks/flask-restplus,awiddersheim/flask-restplus,awiddersheim/flask-restplus
|
Use invoke as build tool
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from invoke import run, task
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__)))
@task
def clean(docs=False, bytecode=False, extra=''):
'''Cleanup all build artifacts'''
patterns = ['build', 'dist', 'cover', 'docs/_build', '**/*.pyc', '*.egg-info']
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def demo():
'''Run the demo'''
run('python {0}/examples/todo.py'.format(ROOT))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=flask_restplus'.format(ROOT), pty=True)
@task
def tox():
'''Run test in all Python versions'''
run('tox', pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/flask_restplus'.format(ROOT))
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def dist():
'''Package for distribution'''
run('cd {0} && python setup.py sdist bdist_wheel'.format(ROOT), pty=True)
@task(tox, doc, qa, dist, default=True)
def all():
pass
|
<commit_before><commit_msg>Use invoke as build tool<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from invoke import run, task
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__)))
@task
def clean(docs=False, bytecode=False, extra=''):
'''Cleanup all build artifacts'''
patterns = ['build', 'dist', 'cover', 'docs/_build', '**/*.pyc', '*.egg-info']
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def demo():
'''Run the demo'''
run('python {0}/examples/todo.py'.format(ROOT))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=flask_restplus'.format(ROOT), pty=True)
@task
def tox():
'''Run test in all Python versions'''
run('tox', pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/flask_restplus'.format(ROOT))
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def dist():
'''Package for distribution'''
run('cd {0} && python setup.py sdist bdist_wheel'.format(ROOT), pty=True)
@task(tox, doc, qa, dist, default=True)
def all():
pass
|
Use invoke as build tool# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from invoke import run, task
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__)))
@task
def clean(docs=False, bytecode=False, extra=''):
'''Cleanup all build artifacts'''
patterns = ['build', 'dist', 'cover', 'docs/_build', '**/*.pyc', '*.egg-info']
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def demo():
'''Run the demo'''
run('python {0}/examples/todo.py'.format(ROOT))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=flask_restplus'.format(ROOT), pty=True)
@task
def tox():
'''Run test in all Python versions'''
run('tox', pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/flask_restplus'.format(ROOT))
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def dist():
'''Package for distribution'''
run('cd {0} && python setup.py sdist bdist_wheel'.format(ROOT), pty=True)
@task(tox, doc, qa, dist, default=True)
def all():
pass
|
<commit_before><commit_msg>Use invoke as build tool<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from invoke import run, task
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__)))
@task
def clean(docs=False, bytecode=False, extra=''):
'''Cleanup all build artifacts'''
patterns = ['build', 'dist', 'cover', 'docs/_build', '**/*.pyc', '*.egg-info']
for pattern in patterns:
print('Removing {0}'.format(pattern))
run('cd {0} && rm -rf {1}'.format(ROOT, pattern))
@task
def demo():
'''Run the demo'''
run('python {0}/examples/todo.py'.format(ROOT))
@task
def test():
'''Run tests suite'''
run('cd {0} && nosetests --rednose --force-color'.format(ROOT), pty=True)
@task
def cover():
'''Run tests suite with coverage'''
run('cd {0} && nosetests --rednose --force-color \
--with-coverage --cover-html --cover-package=flask_restplus'.format(ROOT), pty=True)
@task
def tox():
'''Run test in all Python versions'''
run('tox', pty=True)
@task
def qa():
'''Run a quality report'''
run('flake8 {0}/flask_restplus'.format(ROOT))
@task
def doc():
'''Build the documentation'''
run('cd {0}/doc && make html'.format(ROOT), pty=True)
@task
def dist():
'''Package for distribution'''
run('cd {0} && python setup.py sdist bdist_wheel'.format(ROOT), pty=True)
@task(tox, doc, qa, dist, default=True)
def all():
pass
|
|
0607ed22c5fd12a0a9553f15c7c19a087ecae279
|
tempo.py
|
tempo.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface with a Tempo server"""
import requiem
from requiem import jsclient
# Parameterize the resource names
resource_name = 'periodic_task'
resources_name = '%ss' % resource_name
resource = "/%s/{id}" % resources_name
class TempoClient(jsclient.JSONClient):
"""A client class for accessing the Tempo service."""
@requiem.restmethod('GET', "/%s" % resources_name)
def task_get_all(self, req):
"""Retrieve a list of all existing tasks."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resources_name]
@requiem.restmethod('GET', resource)
def task_get(self, req, id):
"""Retrieve a task by its ID."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
@requiem.restmethod('PUT', resource)
def task_create(self, req, id, task, uuid, recurrence):
"""Create or update an existing task."""
# Build the task object we're going to send
obj = dict(task=task, instance_uuid=uuid, recurrence=recurrence)
# Attach it to the request
self._attach_obj(req, obj)
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
task_update = task_create
@requiem.restmethod('DELETE', resource)
def task_delete(self, req, id):
"""Delete a task."""
# Send the request and ignore the return; Requiem raises an
# exception if we get an error, and success returns a 204
req.send()
|
Build a client for accessing Tempo.
|
Build a client for accessing Tempo.
|
Python
|
apache-2.0
|
rackerlabs/Tempo
|
Build a client for accessing Tempo.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface with a Tempo server"""
import requiem
from requiem import jsclient
# Parameterize the resource names
resource_name = 'periodic_task'
resources_name = '%ss' % resource_name
resource = "/%s/{id}" % resources_name
class TempoClient(jsclient.JSONClient):
"""A client class for accessing the Tempo service."""
@requiem.restmethod('GET', "/%s" % resources_name)
def task_get_all(self, req):
"""Retrieve a list of all existing tasks."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resources_name]
@requiem.restmethod('GET', resource)
def task_get(self, req, id):
"""Retrieve a task by its ID."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
@requiem.restmethod('PUT', resource)
def task_create(self, req, id, task, uuid, recurrence):
"""Create or update an existing task."""
# Build the task object we're going to send
obj = dict(task=task, instance_uuid=uuid, recurrence=recurrence)
# Attach it to the request
self._attach_obj(req, obj)
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
task_update = task_create
@requiem.restmethod('DELETE', resource)
def task_delete(self, req, id):
"""Delete a task."""
# Send the request and ignore the return; Requiem raises an
# exception if we get an error, and success returns a 204
req.send()
|
<commit_before><commit_msg>Build a client for accessing Tempo.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface with a Tempo server"""
import requiem
from requiem import jsclient
# Parameterize the resource names
resource_name = 'periodic_task'
resources_name = '%ss' % resource_name
resource = "/%s/{id}" % resources_name
class TempoClient(jsclient.JSONClient):
"""A client class for accessing the Tempo service."""
@requiem.restmethod('GET', "/%s" % resources_name)
def task_get_all(self, req):
"""Retrieve a list of all existing tasks."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resources_name]
@requiem.restmethod('GET', resource)
def task_get(self, req, id):
"""Retrieve a task by its ID."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
@requiem.restmethod('PUT', resource)
def task_create(self, req, id, task, uuid, recurrence):
"""Create or update an existing task."""
# Build the task object we're going to send
obj = dict(task=task, instance_uuid=uuid, recurrence=recurrence)
# Attach it to the request
self._attach_obj(req, obj)
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
task_update = task_create
@requiem.restmethod('DELETE', resource)
def task_delete(self, req, id):
"""Delete a task."""
# Send the request and ignore the return; Requiem raises an
# exception if we get an error, and success returns a 204
req.send()
|
Build a client for accessing Tempo.# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface with a Tempo server"""
import requiem
from requiem import jsclient
# Parameterize the resource names
resource_name = 'periodic_task'
resources_name = '%ss' % resource_name
resource = "/%s/{id}" % resources_name
class TempoClient(jsclient.JSONClient):
"""A client class for accessing the Tempo service."""
@requiem.restmethod('GET', "/%s" % resources_name)
def task_get_all(self, req):
"""Retrieve a list of all existing tasks."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resources_name]
@requiem.restmethod('GET', resource)
def task_get(self, req, id):
"""Retrieve a task by its ID."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
@requiem.restmethod('PUT', resource)
def task_create(self, req, id, task, uuid, recurrence):
"""Create or update an existing task."""
# Build the task object we're going to send
obj = dict(task=task, instance_uuid=uuid, recurrence=recurrence)
# Attach it to the request
self._attach_obj(req, obj)
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
task_update = task_create
@requiem.restmethod('DELETE', resource)
def task_delete(self, req, id):
"""Delete a task."""
# Send the request and ignore the return; Requiem raises an
# exception if we get an error, and success returns a 204
req.send()
|
<commit_before><commit_msg>Build a client for accessing Tempo.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface with a Tempo server"""
import requiem
from requiem import jsclient
# Parameterize the resource names
resource_name = 'periodic_task'
resources_name = '%ss' % resource_name
resource = "/%s/{id}" % resources_name
class TempoClient(jsclient.JSONClient):
"""A client class for accessing the Tempo service."""
@requiem.restmethod('GET', "/%s" % resources_name)
def task_get_all(self, req):
"""Retrieve a list of all existing tasks."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resources_name]
@requiem.restmethod('GET', resource)
def task_get(self, req, id):
"""Retrieve a task by its ID."""
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
@requiem.restmethod('PUT', resource)
def task_create(self, req, id, task, uuid, recurrence):
"""Create or update an existing task."""
# Build the task object we're going to send
obj = dict(task=task, instance_uuid=uuid, recurrence=recurrence)
# Attach it to the request
self._attach_obj(req, obj)
# Send the request
resp = req.send()
# Return the result
return resp.obj[resource_name]
task_update = task_create
@requiem.restmethod('DELETE', resource)
def task_delete(self, req, id):
"""Delete a task."""
# Send the request and ignore the return; Requiem raises an
# exception if we get an error, and success returns a 204
req.send()
|
|
d1599309941261492abde2445616c42f48660f28
|
Diurnal_Plot.py
|
Diurnal_Plot.py
|
import sys
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
"""
Diurnal_Plot.py:
Produce diurnal plots of fitbit data (5 min intervals).
Inspired by Stephen Wolfram's "The Personal Analytics of my Life"
(http://blog.stephenwolfram.com/2012/03/the-personal-analytics-of-my-life/)
Call with fitbit data as first argument.
"""
if __name__=='__main__':
filename = sys.argv[1]
minutes_in_day = (24 * 60)
X = []
Y = []
values = []
with open(filename,'r') as f:
day_num = -1
day = None
time_num = 0
for l in f:
parts = l.split(",")
date_s = parts[0]
value = int(parts[1])
datet = dt.datetime.strptime(date_s, "%Y-%m-%d %H:%M")
if day != datet.date():
day_num = day_num + 1
day = datet.date()
if value > 0:
values.append(value)
X.append(datet.date())
Y.append(minutes_in_day - (datet.time().hour * 60 + datet.time().minute))
#Y axis locations
locations = np.arange(0,24)*60
#Y axis labels
labels = ["%d:00" % ((24*60 - x) /60) for x in locations]
ax = plt.scatter(X,Y, c=values, s=3, facecolor='0.5', lw = 0, cmap=mpl.cm.YlGnBu)
plt.ylim([0,minutes_in_day])
plt.yticks(locations,labels)
plt.savefig("FitbitData.png", fmt="png");
|
Add very basic diurnal plotter
|
Add very basic diurnal plotter
|
Python
|
mit
|
mgaudet/FitbitAnalysisTools,mgaudet/FitbitAnalysisTools
|
Add very basic diurnal plotter
|
import sys
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
"""
Diurnal_Plot.py:
Produce diurnal plots of fitbit data (5 min intervals).
Inspired by Stephen Wolfram's "The Personal Analytics of my Life"
(http://blog.stephenwolfram.com/2012/03/the-personal-analytics-of-my-life/)
Call with fitbit data as first argument.
"""
if __name__=='__main__':
filename = sys.argv[1]
minutes_in_day = (24 * 60)
X = []
Y = []
values = []
with open(filename,'r') as f:
day_num = -1
day = None
time_num = 0
for l in f:
parts = l.split(",")
date_s = parts[0]
value = int(parts[1])
datet = dt.datetime.strptime(date_s, "%Y-%m-%d %H:%M")
if day != datet.date():
day_num = day_num + 1
day = datet.date()
if value > 0:
values.append(value)
X.append(datet.date())
Y.append(minutes_in_day - (datet.time().hour * 60 + datet.time().minute))
#Y axis locations
locations = np.arange(0,24)*60
#Y axis labels
labels = ["%d:00" % ((24*60 - x) /60) for x in locations]
ax = plt.scatter(X,Y, c=values, s=3, facecolor='0.5', lw = 0, cmap=mpl.cm.YlGnBu)
plt.ylim([0,minutes_in_day])
plt.yticks(locations,labels)
plt.savefig("FitbitData.png", fmt="png");
|
<commit_before><commit_msg>Add very basic diurnal plotter<commit_after>
|
import sys
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
"""
Diurnal_Plot.py:
Produce diurnal plots of fitbit data (5 min intervals).
Inspired by Stephen Wolfram's "The Personal Analytics of my Life"
(http://blog.stephenwolfram.com/2012/03/the-personal-analytics-of-my-life/)
Call with fitbit data as first argument.
"""
if __name__=='__main__':
filename = sys.argv[1]
minutes_in_day = (24 * 60)
X = []
Y = []
values = []
with open(filename,'r') as f:
day_num = -1
day = None
time_num = 0
for l in f:
parts = l.split(",")
date_s = parts[0]
value = int(parts[1])
datet = dt.datetime.strptime(date_s, "%Y-%m-%d %H:%M")
if day != datet.date():
day_num = day_num + 1
day = datet.date()
if value > 0:
values.append(value)
X.append(datet.date())
Y.append(minutes_in_day - (datet.time().hour * 60 + datet.time().minute))
#Y axis locations
locations = np.arange(0,24)*60
#Y axis labels
labels = ["%d:00" % ((24*60 - x) /60) for x in locations]
ax = plt.scatter(X,Y, c=values, s=3, facecolor='0.5', lw = 0, cmap=mpl.cm.YlGnBu)
plt.ylim([0,minutes_in_day])
plt.yticks(locations,labels)
plt.savefig("FitbitData.png", fmt="png");
|
Add very basic diurnal plotterimport sys
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
"""
Diurnal_Plot.py:
Produce diurnal plots of fitbit data (5 min intervals).
Inspired by Stephen Wolfram's "The Personal Analytics of my Life"
(http://blog.stephenwolfram.com/2012/03/the-personal-analytics-of-my-life/)
Call with fitbit data as first argument.
"""
if __name__=='__main__':
filename = sys.argv[1]
minutes_in_day = (24 * 60)
X = []
Y = []
values = []
with open(filename,'r') as f:
day_num = -1
day = None
time_num = 0
for l in f:
parts = l.split(",")
date_s = parts[0]
value = int(parts[1])
datet = dt.datetime.strptime(date_s, "%Y-%m-%d %H:%M")
if day != datet.date():
day_num = day_num + 1
day = datet.date()
if value > 0:
values.append(value)
X.append(datet.date())
Y.append(minutes_in_day - (datet.time().hour * 60 + datet.time().minute))
#Y axis locations
locations = np.arange(0,24)*60
#Y axis labels
labels = ["%d:00" % ((24*60 - x) /60) for x in locations]
ax = plt.scatter(X,Y, c=values, s=3, facecolor='0.5', lw = 0, cmap=mpl.cm.YlGnBu)
plt.ylim([0,minutes_in_day])
plt.yticks(locations,labels)
plt.savefig("FitbitData.png", fmt="png");
|
<commit_before><commit_msg>Add very basic diurnal plotter<commit_after>import sys
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
"""
Diurnal_Plot.py:
Produce diurnal plots of fitbit data (5 min intervals).
Inspired by Stephen Wolfram's "The Personal Analytics of my Life"
(http://blog.stephenwolfram.com/2012/03/the-personal-analytics-of-my-life/)
Call with fitbit data as first argument.
"""
if __name__=='__main__':
filename = sys.argv[1]
minutes_in_day = (24 * 60)
X = []
Y = []
values = []
with open(filename,'r') as f:
day_num = -1
day = None
time_num = 0
for l in f:
parts = l.split(",")
date_s = parts[0]
value = int(parts[1])
datet = dt.datetime.strptime(date_s, "%Y-%m-%d %H:%M")
if day != datet.date():
day_num = day_num + 1
day = datet.date()
if value > 0:
values.append(value)
X.append(datet.date())
Y.append(minutes_in_day - (datet.time().hour * 60 + datet.time().minute))
#Y axis locations
locations = np.arange(0,24)*60
#Y axis labels
labels = ["%d:00" % ((24*60 - x) /60) for x in locations]
ax = plt.scatter(X,Y, c=values, s=3, facecolor='0.5', lw = 0, cmap=mpl.cm.YlGnBu)
plt.ylim([0,minutes_in_day])
plt.yticks(locations,labels)
plt.savefig("FitbitData.png", fmt="png");
|
|
c33254615b4dc30143af1d0cb35b38ca73eae883
|
zvm/zstring.py
|
zvm/zstring.py
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Add an unfinished version of the ZString stream translator.
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.
|
Python
|
bsd-3-clause
|
sussman/zvm,sussman/zvm
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
<commit_before><commit_msg>Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.<commit_after>
|
#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
<commit_before><commit_msg>Add an unfinished version of the ZString stream translator.
* zvm/zstring.py: New file.<commit_after>#
# A ZString-to-ASCII Universal Translator.
#
# For the license of this file, please consult the LICENSE file in the
# root directory of this distribution.
#
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
|
68a0932e6b753ef475148d258fb4e2b9af607d55
|
bin/RandomWallpaper.py
|
bin/RandomWallpaper.py
|
# Copyright 2021 Bruce Dawson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script randomly selects a photo from a list of files.
The list of files has a heading line and subsequent lines have tab-separated
columns where the first column is the file name and the third column is a rating
that goes from 0 to 5 stars. This script only selects from photos that are
five-star rated.
A list of selected photos is retained, which could be used to avoid repetition,
but given a sufficiently large number of photos this shouldn't be a problem.
This can be set as a scheduled task. In this context it is important to run it
using pythonw.exe so that no console window pops up.
"""
import ctypes
import os
import random
import sys
def main():
# This is not, apparently, a completely robust way to get the user's documents
# directory, but the alternatives are much messier and not worth it to me.
documents = os.path.expanduser(r'~\Documents')
database = os.path.join(documents, 'PhotoDatabase.txt')
lines = open(database, 'rb').read().decode('utf-16').splitlines()
# Filter to just the 5-star photos. This also removes the heading line.
filtered = [line for line in lines if line.split('\t')[2] == '5']
while True:
# Select a random line.
line = random.choice(filtered)
path = line.split('\t')[0]
# Try again if the photo doesn't exist for some reason.
if not os.path.exists(path):
continue
# Try again if the file is a video.
extension = os.path.splitext(path)[1].lower()
if extension in ['.mp4', '.mov', '.wmv']:
continue
# Magic incantation to set the wallpaper.
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 2)
break
# Record information about the displayed photo.
history = os.path.join(documents, 'PhotoWallpaperHistory.txt')
with open(history, 'a') as f:
f.write('%s\n' % line)
if __name__ == '__main__':
sys.exit(main())
|
Add script to display random wallpaper
|
Add script to display random wallpaper
Windows 10 took away the handy "random good wallpaper" and apparently it
took me five years to get around to recreating it. This script makes use
of my existing database of photos, with star ratings listed. It filters
that database (actually just a text file) down to just the five-star
photos, skips the videos, and then selects one of those great photos as
the desktop wallpaper. It also records which photo was selected because
it's often nice to know where a photo is from.
Because this is dependent on the photo database it is not much use to
anyone else, but maybe some day I'll share that code as well.
|
Python
|
apache-2.0
|
randomascii/tools,randomascii/tools,randomascii/tools
|
Add script to display random wallpaper
Windows 10 took away the handy "random good wallpaper" and apparently it
took me five years to get around to recreating it. This script makes use
of my existing database of photos, with star ratings listed. It filters
that database (actually just a text file) down to just the five-star
photos, skips the videos, and then selects one of those great photos as
the desktop wallpaper. It also records which photo was selected because
it's often nice to know where a photo is from.
Because this is dependent on the photo database it is not much use to
anyone else, but maybe some day I'll share that code as well.
|
# Copyright 2021 Bruce Dawson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script randomly selects a photo from a list of files.
The list of files has a heading line and subsequent lines have tab-separated
columns where the first column is the file name and the third column is a rating
that goes from 0 to 5 stars. This script only selects from photos that are
five-star rated.
A list of selected photos is retained, which could be used to avoid repetition,
but given a sufficiently large number of photos this shouldn't be a problem.
This can be set as a scheduled task. In this context it is important to run it
using pythonw.exe so that no console window pops up.
"""
import ctypes
import os
import random
import sys
def main():
# This is not, apparently, a completely robust way to get the user's documents
# directory, but the alternatives are much messier and not worth it to me.
documents = os.path.expanduser(r'~\Documents')
database = os.path.join(documents, 'PhotoDatabase.txt')
lines = open(database, 'rb').read().decode('utf-16').splitlines()
# Filter to just the 5-star photos. This also removes the heading line.
filtered = [line for line in lines if line.split('\t')[2] == '5']
while True:
# Select a random line.
line = random.choice(filtered)
path = line.split('\t')[0]
# Try again if the photo doesn't exist for some reason.
if not os.path.exists(path):
continue
# Try again if the file is a video.
extension = os.path.splitext(path)[1].lower()
if extension in ['.mp4', '.mov', '.wmv']:
continue
# Magic incantation to set the wallpaper.
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 2)
break
# Record information about the displayed photo.
history = os.path.join(documents, 'PhotoWallpaperHistory.txt')
with open(history, 'a') as f:
f.write('%s\n' % line)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to display random wallpaper
Windows 10 took away the handy "random good wallpaper" and apparently it
took me five years to get around to recreating it. This script makes use
of my existing database of photos, with star ratings listed. It filters
that database (actually just a text file) down to just the five-star
photos, skips the videos, and then selects one of those great photos as
the desktop wallpaper. It also records which photo was selected because
it's often nice to know where a photo is from.
Because this is dependent on the photo database it is not much use to
anyone else, but maybe some day I'll share that code as well.<commit_after>
|
# Copyright 2021 Bruce Dawson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script randomly selects a photo from a list of files.
The list of files has a heading line and subsequent lines have tab-separated
columns where the first column is the file name and the third column is a rating
that goes from 0 to 5 stars. This script only selects from photos that are
five-star rated.
A list of selected photos is retained, which could be used to avoid repetition,
but given a sufficiently large number of photos this shouldn't be a problem.
This can be set as a scheduled task. In this context it is important to run it
using pythonw.exe so that no console window pops up.
"""
import ctypes
import os
import random
import sys
def main():
# This is not, apparently, a completely robust way to get the user's documents
# directory, but the alternatives are much messier and not worth it to me.
documents = os.path.expanduser(r'~\Documents')
database = os.path.join(documents, 'PhotoDatabase.txt')
lines = open(database, 'rb').read().decode('utf-16').splitlines()
# Filter to just the 5-star photos. This also removes the heading line.
filtered = [line for line in lines if line.split('\t')[2] == '5']
while True:
# Select a random line.
line = random.choice(filtered)
path = line.split('\t')[0]
# Try again if the photo doesn't exist for some reason.
if not os.path.exists(path):
continue
# Try again if the file is a video.
extension = os.path.splitext(path)[1].lower()
if extension in ['.mp4', '.mov', '.wmv']:
continue
# Magic incantation to set the wallpaper.
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 2)
break
# Record information about the displayed photo.
history = os.path.join(documents, 'PhotoWallpaperHistory.txt')
with open(history, 'a') as f:
f.write('%s\n' % line)
if __name__ == '__main__':
sys.exit(main())
|
Add script to display random wallpaper
Windows 10 took away the handy "random good wallpaper" and apparently it
took me five years to get around to recreating it. This script makes use
of my existing database of photos, with star ratings listed. It filters
that database (actually just a text file) down to just the five-star
photos, skips the videos, and then selects one of those great photos as
the desktop wallpaper. It also records which photo was selected because
it's often nice to know where a photo is from.
Because this is dependent on the photo database it is not much use to
anyone else, but maybe some day I'll share that code as well.# Copyright 2021 Bruce Dawson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script randomly selects a photo from a list of files.
The list of files has a heading line and subsequent lines have tab-separated
columns where the first column is the file name and the third column is a rating
that goes from 0 to 5 stars. This script only selects from photos that are
five-star rated.
A list of selected photos is retained, which could be used to avoid repetition,
but given a sufficiently large number of photos this shouldn't be a problem.
This can be set as a scheduled task. In this context it is important to run it
using pythonw.exe so that no console window pops up.
"""
import ctypes
import os
import random
import sys
def main():
# This is not, apparently, a completely robust way to get the user's documents
# directory, but the alternatives are much messier and not worth it to me.
documents = os.path.expanduser(r'~\Documents')
database = os.path.join(documents, 'PhotoDatabase.txt')
lines = open(database, 'rb').read().decode('utf-16').splitlines()
# Filter to just the 5-star photos. This also removes the heading line.
filtered = [line for line in lines if line.split('\t')[2] == '5']
while True:
# Select a random line.
line = random.choice(filtered)
path = line.split('\t')[0]
# Try again if the photo doesn't exist for some reason.
if not os.path.exists(path):
continue
# Try again if the file is a video.
extension = os.path.splitext(path)[1].lower()
if extension in ['.mp4', '.mov', '.wmv']:
continue
# Magic incantation to set the wallpaper.
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 2)
break
# Record information about the displayed photo.
history = os.path.join(documents, 'PhotoWallpaperHistory.txt')
with open(history, 'a') as f:
f.write('%s\n' % line)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to display random wallpaper
Windows 10 took away the handy "random good wallpaper" and apparently it
took me five years to get around to recreating it. This script makes use
of my existing database of photos, with star ratings listed. It filters
that database (actually just a text file) down to just the five-star
photos, skips the videos, and then selects one of those great photos as
the desktop wallpaper. It also records which photo was selected because
it's often nice to know where a photo is from.
Because this is dependent on the photo database it is not much use to
anyone else, but maybe some day I'll share that code as well.<commit_after># Copyright 2021 Bruce Dawson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script randomly selects a photo from a list of files.
The list of files has a heading line and subsequent lines have tab-separated
columns where the first column is the file name and the third column is a rating
that goes from 0 to 5 stars. This script only selects from photos that are
five-star rated.
A list of selected photos is retained, which could be used to avoid repetition,
but given a sufficiently large number of photos this shouldn't be a problem.
This can be set as a scheduled task. In this context it is important to run it
using pythonw.exe so that no console window pops up.
"""
import ctypes
import os
import random
import sys
def main():
# This is not, apparently, a completely robust way to get the user's documents
# directory, but the alternatives are much messier and not worth it to me.
documents = os.path.expanduser(r'~\Documents')
database = os.path.join(documents, 'PhotoDatabase.txt')
lines = open(database, 'rb').read().decode('utf-16').splitlines()
# Filter to just the 5-star photos. This also removes the heading line.
filtered = [line for line in lines if line.split('\t')[2] == '5']
while True:
# Select a random line.
line = random.choice(filtered)
path = line.split('\t')[0]
# Try again if the photo doesn't exist for some reason.
if not os.path.exists(path):
continue
# Try again if the file is a video.
extension = os.path.splitext(path)[1].lower()
if extension in ['.mp4', '.mov', '.wmv']:
continue
# Magic incantation to set the wallpaper.
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 2)
break
# Record information about the displayed photo.
history = os.path.join(documents, 'PhotoWallpaperHistory.txt')
with open(history, 'a') as f:
f.write('%s\n' % line)
if __name__ == '__main__':
sys.exit(main())
|
|
31bd7cac4506594a79958664026ee841c69ae4b7
|
gmn/src/gmn/app/migrations/0005_auto_20170527_1554.py
|
gmn/src/gmn/app/migrations/0005_auto_20170527_1554.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170523_0137'),
]
operations = [
migrations.AlterField(
model_name='localreplica',
name='pid',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='localreplica_pid', to='app.IdNamespace'),
),
]
|
Add Django db migration to latest version
|
Add Django db migration to latest version
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add Django db migration to latest version
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170523_0137'),
]
operations = [
migrations.AlterField(
model_name='localreplica',
name='pid',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='localreplica_pid', to='app.IdNamespace'),
),
]
|
<commit_before><commit_msg>Add Django db migration to latest version<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170523_0137'),
]
operations = [
migrations.AlterField(
model_name='localreplica',
name='pid',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='localreplica_pid', to='app.IdNamespace'),
),
]
|
Add Django db migration to latest version# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170523_0137'),
]
operations = [
migrations.AlterField(
model_name='localreplica',
name='pid',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='localreplica_pid', to='app.IdNamespace'),
),
]
|
<commit_before><commit_msg>Add Django db migration to latest version<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 15:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20170523_0137'),
]
operations = [
migrations.AlterField(
model_name='localreplica',
name='pid',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='localreplica_pid', to='app.IdNamespace'),
),
]
|
|
2b91bf8037fd12f31eb639b0847217b07f413a66
|
bin/refresh_varsnap.py
|
bin/refresh_varsnap.py
|
"""
This script refreshes production varsnap snaps
"""
import os
import subprocess
import sys
import requests
import syspath
from syspath import git_root
from app import serve
os.environ['ENV'] = 'production'
app = serve.app.test_client()
app.get('/')
app.get('/health')
app.get('/robots.txt')
app.get('/asdf')
|
Add script to refresh python production snaps
|
Add script to refresh python production snaps
|
Python
|
mit
|
albertyw/base-flask,albertyw/base-flask,albertyw/base-flask,albertyw/base-flask
|
Add script to refresh python production snaps
|
"""
This script refreshes production varsnap snaps
"""
import os
import subprocess
import sys
import requests
import syspath
from syspath import git_root
from app import serve
os.environ['ENV'] = 'production'
app = serve.app.test_client()
app.get('/')
app.get('/health')
app.get('/robots.txt')
app.get('/asdf')
|
<commit_before><commit_msg>Add script to refresh python production snaps<commit_after>
|
"""
This script refreshes production varsnap snaps
"""
import os
import subprocess
import sys
import requests
import syspath
from syspath import git_root
from app import serve
os.environ['ENV'] = 'production'
app = serve.app.test_client()
app.get('/')
app.get('/health')
app.get('/robots.txt')
app.get('/asdf')
|
Add script to refresh python production snaps"""
This script refreshes production varsnap snaps
"""
import os
import subprocess
import sys
import requests
import syspath
from syspath import git_root
from app import serve
os.environ['ENV'] = 'production'
app = serve.app.test_client()
app.get('/')
app.get('/health')
app.get('/robots.txt')
app.get('/asdf')
|
<commit_before><commit_msg>Add script to refresh python production snaps<commit_after>"""
This script refreshes production varsnap snaps
"""
import os
import subprocess
import sys
import requests
import syspath
from syspath import git_root
from app import serve
os.environ['ENV'] = 'production'
app = serve.app.test_client()
app.get('/')
app.get('/health')
app.get('/robots.txt')
app.get('/asdf')
|
|
a311df9c2d591ca3db33b42ce50de37968f4f5b8
|
letsencrypt/client/tests/display/enhancements_test.py
|
letsencrypt/client/tests/display/enhancements_test.py
|
"""Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client.display import display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.client.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(
errors.LetsEncryptClientError, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.client.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main()
|
Add tests for enhancments UI
|
Add tests for enhancments UI
|
Python
|
apache-2.0
|
BillKeenan/lets-encrypt-preview,lbeltrame/letsencrypt,rugk/letsencrypt,Sveder/letsencrypt,vcavallo/letsencrypt,mrb/letsencrypt,stweil/letsencrypt,VladimirTyrin/letsencrypt,TheBoegl/letsencrypt,g1franc/lets-encrypt-preview,letsencrypt/letsencrypt,bestwpw/letsencrypt,jsha/letsencrypt,jmaurice/letsencrypt,twstrike/le_for_patching,ruo91/letsencrypt,armersong/letsencrypt,TheBoegl/letsencrypt,bsmr-misc-forks/letsencrypt,digideskio/lets-encrypt-preview,Hasimir/letsencrypt,beermix/letsencrypt,xgin/letsencrypt,rlustin/letsencrypt,dietsche/letsencrypt,brentdax/letsencrypt,twstrike/le_for_patching,rutsky/letsencrypt,beermix/letsencrypt,sapics/letsencrypt,rlustin/letsencrypt,ahojjati/letsencrypt,fmarier/letsencrypt,goofwear/letsencrypt,Hasimir/letsencrypt,BKreisel/letsencrypt,riseofthetigers/letsencrypt,armersong/letsencrypt,ahojjati/letsencrypt,rutsky/letsencrypt,skynet/letsencrypt,PeterMosmans/letsencrypt,kuba/letsencrypt,brentdax/letsencrypt,digideskio/lets-encrypt-preview,sjerdo/letsencrypt,mrb/letsencrypt,stewnorriss/letsencrypt,jmaurice/letsencrypt,BKreisel/letsencrypt,Jonadabe/letsencrypt,jtl999/certbot,bsmr-misc-forks/letsencrypt,Bachmann1234/letsencrypt,jsha/letsencrypt,sjerdo/letsencrypt,ruo91/letsencrypt,modulexcite/letsencrypt,kevinlondon/letsencrypt,lmcro/letsencrypt,skynet/letsencrypt,Sveder/letsencrypt,thanatos/lets-encrypt-preview,tyagi-prashant/letsencrypt,bestwpw/letsencrypt,DavidGarciaCat/letsencrypt,modulexcite/letsencrypt,letsencrypt/letsencrypt,tyagi-prashant/letsencrypt,stewnorriss/letsencrypt,hsduk/lets-encrypt-preview,BillKeenan/lets-encrypt-preview,Jonadabe/letsencrypt,martindale/letsencrypt,solidgoldbomb/letsencrypt,tdfischer/lets-encrypt-preview,deserted/letsencrypt,g1franc/lets-encrypt-preview,martindale/letsencrypt,diracdeltas/lets-encrypt-preview,Jadaw1n/letsencrypt,deserted/letsencrypt,vcavallo/letsencrypt,xgin/letsencrypt,kevinlondon/letsencrypt,riseofthetigers/letsencrypt,rugk/letsencrypt,Bachmann1234/letsencrypt,luorenjin/letsencrypt,hlieberman/letsencrypt,mitnk/letsencrypt,VladimirTyrin/letsencrypt,kuba/letsencrypt,wteiken/letsencrypt,Jadaw1n/letsencrypt,wteiken/letsencrypt,lmcro/letsencrypt,jmhodges/letsencrypt,hsduk/lets-encrypt-preview,DavidGarciaCat/letsencrypt,ghyde/letsencrypt,diracdeltas/lets-encrypt-preview,jtl999/certbot,fmarier/letsencrypt,ghyde/letsencrypt,sapics/letsencrypt,stweil/letsencrypt,piru/letsencrypt,tdfischer/lets-encrypt-preview,mitnk/letsencrypt,dietsche/letsencrypt,PeterMosmans/letsencrypt,goofwear/letsencrypt,luorenjin/letsencrypt,piru/letsencrypt,lbeltrame/letsencrypt,solidgoldbomb/letsencrypt,jmhodges/letsencrypt,thanatos/lets-encrypt-preview,hlieberman/letsencrypt
|
Add tests for enhancments UI
|
"""Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client.display import display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.client.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(
errors.LetsEncryptClientError, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.client.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests for enhancments UI<commit_after>
|
"""Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client.display import display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.client.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(
errors.LetsEncryptClientError, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.client.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main()
|
Add tests for enhancments UI"""Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client.display import display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.client.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(
errors.LetsEncryptClientError, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.client.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests for enhancments UI<commit_after>"""Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client.display import display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.client.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(
errors.LetsEncryptClientError, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.client.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.client.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main()
|
|
21d46e91fafe1081f49eabfb93f9caf6c39a31ba
|
laboratorio/rsa/__init__.py
|
laboratorio/rsa/__init__.py
|
# -*- coding: utf-8 -*-
from rsa import RSAKeyGeneration
import sys
def main():
print('mensagem:')
mensagem = input()
if len(sys.argv) > 2:
tamanho_chave, acuracidade = eval(sys.argv[1]), eval(sys.argv[2])
else:
tamanho_chave, acuracidade = 64, 15
rsa = RSAKeyGeneration()
if isinstance(mensagem, int):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt(mensagem, exponent, public_key)
print('\nMensagem-valor encriptada:', ciphertext)
m = rsa.decrypt(ciphertext, private_key, public_key)
print('\nMensagem-valor decriptada:', m)
if isinstance(mensagem, str):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt_bytes(mensagem, exponent, public_key)
print('\nMensagem-texto encriptada:')
print('==rsa==')
for x in ciphertext:
print(x)
print('==' + str(tamanho_chave) + 'bits==')
m = rsa.decrypt_byte(ciphertext, private_key, public_key)
print('\nMensagem-texto decriptada:', m)
rest = ''
for el in m:
rest += chr(el)
print('\nMensagem-texto decriptada restaurada:', rest)
if __name__ == "__main__":
main()
|
Add init sem teste de primalidade
|
Add init sem teste de primalidade
|
Python
|
mit
|
tonussi/inseguro,tonussi/inseguro,tonussi/inseguro
|
Add init sem teste de primalidade
|
# -*- coding: utf-8 -*-
from rsa import RSAKeyGeneration
import sys
def main():
print('mensagem:')
mensagem = input()
if len(sys.argv) > 2:
tamanho_chave, acuracidade = eval(sys.argv[1]), eval(sys.argv[2])
else:
tamanho_chave, acuracidade = 64, 15
rsa = RSAKeyGeneration()
if isinstance(mensagem, int):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt(mensagem, exponent, public_key)
print('\nMensagem-valor encriptada:', ciphertext)
m = rsa.decrypt(ciphertext, private_key, public_key)
print('\nMensagem-valor decriptada:', m)
if isinstance(mensagem, str):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt_bytes(mensagem, exponent, public_key)
print('\nMensagem-texto encriptada:')
print('==rsa==')
for x in ciphertext:
print(x)
print('==' + str(tamanho_chave) + 'bits==')
m = rsa.decrypt_byte(ciphertext, private_key, public_key)
print('\nMensagem-texto decriptada:', m)
rest = ''
for el in m:
rest += chr(el)
print('\nMensagem-texto decriptada restaurada:', rest)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add init sem teste de primalidade<commit_after>
|
# -*- coding: utf-8 -*-
from rsa import RSAKeyGeneration
import sys
def main():
print('mensagem:')
mensagem = input()
if len(sys.argv) > 2:
tamanho_chave, acuracidade = eval(sys.argv[1]), eval(sys.argv[2])
else:
tamanho_chave, acuracidade = 64, 15
rsa = RSAKeyGeneration()
if isinstance(mensagem, int):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt(mensagem, exponent, public_key)
print('\nMensagem-valor encriptada:', ciphertext)
m = rsa.decrypt(ciphertext, private_key, public_key)
print('\nMensagem-valor decriptada:', m)
if isinstance(mensagem, str):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt_bytes(mensagem, exponent, public_key)
print('\nMensagem-texto encriptada:')
print('==rsa==')
for x in ciphertext:
print(x)
print('==' + str(tamanho_chave) + 'bits==')
m = rsa.decrypt_byte(ciphertext, private_key, public_key)
print('\nMensagem-texto decriptada:', m)
rest = ''
for el in m:
rest += chr(el)
print('\nMensagem-texto decriptada restaurada:', rest)
if __name__ == "__main__":
main()
|
Add init sem teste de primalidade# -*- coding: utf-8 -*-
from rsa import RSAKeyGeneration
import sys
def main():
print('mensagem:')
mensagem = input()
if len(sys.argv) > 2:
tamanho_chave, acuracidade = eval(sys.argv[1]), eval(sys.argv[2])
else:
tamanho_chave, acuracidade = 64, 15
rsa = RSAKeyGeneration()
if isinstance(mensagem, int):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt(mensagem, exponent, public_key)
print('\nMensagem-valor encriptada:', ciphertext)
m = rsa.decrypt(ciphertext, private_key, public_key)
print('\nMensagem-valor decriptada:', m)
if isinstance(mensagem, str):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt_bytes(mensagem, exponent, public_key)
print('\nMensagem-texto encriptada:')
print('==rsa==')
for x in ciphertext:
print(x)
print('==' + str(tamanho_chave) + 'bits==')
m = rsa.decrypt_byte(ciphertext, private_key, public_key)
print('\nMensagem-texto decriptada:', m)
rest = ''
for el in m:
rest += chr(el)
print('\nMensagem-texto decriptada restaurada:', rest)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add init sem teste de primalidade<commit_after># -*- coding: utf-8 -*-
from rsa import RSAKeyGeneration
import sys
def main():
print('mensagem:')
mensagem = input()
if len(sys.argv) > 2:
tamanho_chave, acuracidade = eval(sys.argv[1]), eval(sys.argv[2])
else:
tamanho_chave, acuracidade = 64, 15
rsa = RSAKeyGeneration()
if isinstance(mensagem, int):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt(mensagem, exponent, public_key)
print('\nMensagem-valor encriptada:', ciphertext)
m = rsa.decrypt(ciphertext, private_key, public_key)
print('\nMensagem-valor decriptada:', m)
if isinstance(mensagem, str):
public_key, exponent, private_key = rsa.generate(tamanho_chave, acuracidade)
ciphertext = rsa.encrypt_bytes(mensagem, exponent, public_key)
print('\nMensagem-texto encriptada:')
print('==rsa==')
for x in ciphertext:
print(x)
print('==' + str(tamanho_chave) + 'bits==')
m = rsa.decrypt_byte(ciphertext, private_key, public_key)
print('\nMensagem-texto decriptada:', m)
rest = ''
for el in m:
rest += chr(el)
print('\nMensagem-texto decriptada restaurada:', rest)
if __name__ == "__main__":
main()
|
|
155fa00e27fdc12c6c11925308a7790bedfca254
|
Problems/alternatingSums.py
|
Problems/alternatingSums.py
|
def alternatingSums(a):
# teamOne = [n for i, n in enumerate(a) if i % 2 == 0]
# teamTwo = [n for i, n in enumerate(a) if i % 2 == 1]
# return (sum(teamOne), sum(teamTwo))
return (sum(a[::2]), sum(a[1::2]))
|
Solve Code Fights alternating sums problem
|
Solve Code Fights alternating sums problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights alternating sums problem
|
def alternatingSums(a):
# teamOne = [n for i, n in enumerate(a) if i % 2 == 0]
# teamTwo = [n for i, n in enumerate(a) if i % 2 == 1]
# return (sum(teamOne), sum(teamTwo))
return (sum(a[::2]), sum(a[1::2]))
|
<commit_before><commit_msg>Solve Code Fights alternating sums problem<commit_after>
|
def alternatingSums(a):
# teamOne = [n for i, n in enumerate(a) if i % 2 == 0]
# teamTwo = [n for i, n in enumerate(a) if i % 2 == 1]
# return (sum(teamOne), sum(teamTwo))
return (sum(a[::2]), sum(a[1::2]))
|
Solve Code Fights alternating sums problemdef alternatingSums(a):
# teamOne = [n for i, n in enumerate(a) if i % 2 == 0]
# teamTwo = [n for i, n in enumerate(a) if i % 2 == 1]
# return (sum(teamOne), sum(teamTwo))
return (sum(a[::2]), sum(a[1::2]))
|
<commit_before><commit_msg>Solve Code Fights alternating sums problem<commit_after>def alternatingSums(a):
# teamOne = [n for i, n in enumerate(a) if i % 2 == 0]
# teamTwo = [n for i, n in enumerate(a) if i % 2 == 1]
# return (sum(teamOne), sum(teamTwo))
return (sum(a[::2]), sum(a[1::2]))
|
|
f8bf5d7ee7617b1600130c2b832b3704d7c75c5e
|
dens_slope.py
|
dens_slope.py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENS_SLOPE
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def dens_slope(r, rs, alpha, beta, gamma):
"""
The slope of the density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
slope = -gamma + (gamma-beta)*r**alpha/(rs**alpha + r**alpha)
return slope
|
Add routine to calculate density slope.
|
Add routine to calculate density slope.
|
Python
|
bsd-2-clause
|
lauralwatkins/genhernquist
|
Add routine to calculate density slope.
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENS_SLOPE
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def dens_slope(r, rs, alpha, beta, gamma):
"""
The slope of the density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
slope = -gamma + (gamma-beta)*r**alpha/(rs**alpha + r**alpha)
return slope
|
<commit_before><commit_msg>Add routine to calculate density slope.<commit_after>
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENS_SLOPE
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def dens_slope(r, rs, alpha, beta, gamma):
"""
The slope of the density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
slope = -gamma + (gamma-beta)*r**alpha/(rs**alpha + r**alpha)
return slope
|
Add routine to calculate density slope.#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENS_SLOPE
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def dens_slope(r, rs, alpha, beta, gamma):
"""
The slope of the density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
slope = -gamma + (gamma-beta)*r**alpha/(rs**alpha + r**alpha)
return slope
|
<commit_before><commit_msg>Add routine to calculate density slope.<commit_after>#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENS_SLOPE
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def dens_slope(r, rs, alpha, beta, gamma):
"""
The slope of the density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
slope = -gamma + (gamma-beta)*r**alpha/(rs**alpha + r**alpha)
return slope
|
|
4481fb40a34ebb9ec7ab3c857697fa1049a6e600
|
resolwe/flow/migrations/0042_delete_obsolete_perms.py
|
resolwe/flow/migrations/0042_delete_obsolete_perms.py
|
# Generated by Django 2.2.6 on 2019-11-11 14:03
from django.db import migrations
def delete_obsolete_perms(apps, schema_editor):
"""Delete obsolete permissions from the database."""
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(
codename__in=[
'add_collection',
'add_entity',
'download_collection',
'download_entity',
'download_data'
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('flow', '0041_remove_download_perm'),
]
operations = [
migrations.RunPython(delete_obsolete_perms)
]
|
Delete obsolete permissions from the database
|
Delete obsolete permissions from the database
|
Python
|
apache-2.0
|
genialis/resolwe,genialis/resolwe
|
Delete obsolete permissions from the database
|
# Generated by Django 2.2.6 on 2019-11-11 14:03
from django.db import migrations
def delete_obsolete_perms(apps, schema_editor):
"""Delete obsolete permissions from the database."""
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(
codename__in=[
'add_collection',
'add_entity',
'download_collection',
'download_entity',
'download_data'
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('flow', '0041_remove_download_perm'),
]
operations = [
migrations.RunPython(delete_obsolete_perms)
]
|
<commit_before><commit_msg>Delete obsolete permissions from the database<commit_after>
|
# Generated by Django 2.2.6 on 2019-11-11 14:03
from django.db import migrations
def delete_obsolete_perms(apps, schema_editor):
"""Delete obsolete permissions from the database."""
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(
codename__in=[
'add_collection',
'add_entity',
'download_collection',
'download_entity',
'download_data'
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('flow', '0041_remove_download_perm'),
]
operations = [
migrations.RunPython(delete_obsolete_perms)
]
|
Delete obsolete permissions from the database# Generated by Django 2.2.6 on 2019-11-11 14:03
from django.db import migrations
def delete_obsolete_perms(apps, schema_editor):
"""Delete obsolete permissions from the database."""
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(
codename__in=[
'add_collection',
'add_entity',
'download_collection',
'download_entity',
'download_data'
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('flow', '0041_remove_download_perm'),
]
operations = [
migrations.RunPython(delete_obsolete_perms)
]
|
<commit_before><commit_msg>Delete obsolete permissions from the database<commit_after># Generated by Django 2.2.6 on 2019-11-11 14:03
from django.db import migrations
def delete_obsolete_perms(apps, schema_editor):
"""Delete obsolete permissions from the database."""
Permission = apps.get_model('auth', 'Permission')
Permission.objects.filter(
codename__in=[
'add_collection',
'add_entity',
'download_collection',
'download_entity',
'download_data'
]
).delete()
class Migration(migrations.Migration):
dependencies = [
('flow', '0041_remove_download_perm'),
]
operations = [
migrations.RunPython(delete_obsolete_perms)
]
|
|
e9a7f28e11e33d5d8679c65697b5588440a4eddd
|
tests/util/test_platform.py
|
tests/util/test_platform.py
|
import platform
from keyring.util.platform_ import (
config_root,
data_root,
_config_root_Linux,
_config_root_Windows,
_data_root_Linux,
_data_root_Windows,
)
def test_platform_Linux():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Linux":
return
assert config_root == _config_root_Linux
assert data_root == _data_root_Linux
def test_platform_Windows():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Windows":
return
assert config_root == _config_root_Windows
assert data_root == _data_root_Windows
|
Add failing test for keyring.util.platform_.config_root
|
Add failing test for keyring.util.platform_.config_root
|
Python
|
mit
|
jaraco/keyring
|
Add failing test for keyring.util.platform_.config_root
|
import platform
from keyring.util.platform_ import (
config_root,
data_root,
_config_root_Linux,
_config_root_Windows,
_data_root_Linux,
_data_root_Windows,
)
def test_platform_Linux():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Linux":
return
assert config_root == _config_root_Linux
assert data_root == _data_root_Linux
def test_platform_Windows():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Windows":
return
assert config_root == _config_root_Windows
assert data_root == _data_root_Windows
|
<commit_before><commit_msg>Add failing test for keyring.util.platform_.config_root<commit_after>
|
import platform
from keyring.util.platform_ import (
config_root,
data_root,
_config_root_Linux,
_config_root_Windows,
_data_root_Linux,
_data_root_Windows,
)
def test_platform_Linux():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Linux":
return
assert config_root == _config_root_Linux
assert data_root == _data_root_Linux
def test_platform_Windows():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Windows":
return
assert config_root == _config_root_Windows
assert data_root == _data_root_Windows
|
Add failing test for keyring.util.platform_.config_rootimport platform
from keyring.util.platform_ import (
config_root,
data_root,
_config_root_Linux,
_config_root_Windows,
_data_root_Linux,
_data_root_Windows,
)
def test_platform_Linux():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Linux":
return
assert config_root == _config_root_Linux
assert data_root == _data_root_Linux
def test_platform_Windows():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Windows":
return
assert config_root == _config_root_Windows
assert data_root == _data_root_Windows
|
<commit_before><commit_msg>Add failing test for keyring.util.platform_.config_root<commit_after>import platform
from keyring.util.platform_ import (
config_root,
data_root,
_config_root_Linux,
_config_root_Windows,
_data_root_Linux,
_data_root_Windows,
)
def test_platform_Linux():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Linux":
return
assert config_root == _config_root_Linux
assert data_root == _data_root_Linux
def test_platform_Windows():
# rely on the Github Actions workflow to run this on different platforms
if platform.system() != "Windows":
return
assert config_root == _config_root_Windows
assert data_root == _data_root_Windows
|
|
c73efe65afd65b6f1b706fba144453e1e40698ba
|
tests/cli/test_repair.py
|
tests/cli/test_repair.py
|
from vdirsyncer.cli.utils import repair_storage
from vdirsyncer.storage.memory import MemoryStorage
from vdirsyncer.utils.vobject import Item
def test_repair_uids():
s = MemoryStorage()
s.upload(Item(u'BEGIN:VCARD\nEND:VCARD'))
repair_storage(s)
uid, = [s.get(href)[0].uid for href, etag in s.list()]
s.upload(Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)))
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 == uid2
repair_storage(s)
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 != uid2
|
Add test for repair command
|
Add test for repair command
|
Python
|
mit
|
hobarrera/vdirsyncer,untitaker/vdirsyncer,tribut/vdirsyncer,credativUK/vdirsyncer,credativUK/vdirsyncer,untitaker/vdirsyncer,untitaker/vdirsyncer,hobarrera/vdirsyncer,tribut/vdirsyncer
|
Add test for repair command
|
from vdirsyncer.cli.utils import repair_storage
from vdirsyncer.storage.memory import MemoryStorage
from vdirsyncer.utils.vobject import Item
def test_repair_uids():
s = MemoryStorage()
s.upload(Item(u'BEGIN:VCARD\nEND:VCARD'))
repair_storage(s)
uid, = [s.get(href)[0].uid for href, etag in s.list()]
s.upload(Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)))
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 == uid2
repair_storage(s)
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 != uid2
|
<commit_before><commit_msg>Add test for repair command<commit_after>
|
from vdirsyncer.cli.utils import repair_storage
from vdirsyncer.storage.memory import MemoryStorage
from vdirsyncer.utils.vobject import Item
def test_repair_uids():
s = MemoryStorage()
s.upload(Item(u'BEGIN:VCARD\nEND:VCARD'))
repair_storage(s)
uid, = [s.get(href)[0].uid for href, etag in s.list()]
s.upload(Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)))
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 == uid2
repair_storage(s)
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 != uid2
|
Add test for repair commandfrom vdirsyncer.cli.utils import repair_storage
from vdirsyncer.storage.memory import MemoryStorage
from vdirsyncer.utils.vobject import Item
def test_repair_uids():
s = MemoryStorage()
s.upload(Item(u'BEGIN:VCARD\nEND:VCARD'))
repair_storage(s)
uid, = [s.get(href)[0].uid for href, etag in s.list()]
s.upload(Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)))
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 == uid2
repair_storage(s)
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 != uid2
|
<commit_before><commit_msg>Add test for repair command<commit_after>from vdirsyncer.cli.utils import repair_storage
from vdirsyncer.storage.memory import MemoryStorage
from vdirsyncer.utils.vobject import Item
def test_repair_uids():
s = MemoryStorage()
s.upload(Item(u'BEGIN:VCARD\nEND:VCARD'))
repair_storage(s)
uid, = [s.get(href)[0].uid for href, etag in s.list()]
s.upload(Item(u'BEGIN:VCARD\nUID:{}\nEND:VCARD'.format(uid)))
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 == uid2
repair_storage(s)
uid1, uid2 = [s.get(href)[0].uid for href, etag in s.list()]
assert uid1 != uid2
|
|
62ebb9b6e0e3ca0ed9e39b016e7b8e75dad92600
|
dataviva/apps/session/login_providers.py
|
dataviva/apps/session/login_providers.py
|
from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
from config import GOOGLE_OAUTH_ID, GOOGLE_OAUTH_SECRET, \
TWITTER_OAUTH_ID, TWITTER_OAUTH_SECRET, \
FACEBOOK_OAUTH_ID, FACEBOOK_OAUTH_SECRET
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_OAUTH_ID,
consumer_secret=GOOGLE_OAUTH_SECRET)
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1.1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key=TWITTER_OAUTH_ID,
consumer_secret=TWITTER_OAUTH_SECRET)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_OAUTH_ID,
consumer_secret=FACEBOOK_OAUTH_SECRET,
request_token_params={'scope': 'email'})
|
Add login provides in session module.
|
Add login provides in session module.
|
Python
|
mit
|
DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site
|
Add login provides in session module.
|
from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
from config import GOOGLE_OAUTH_ID, GOOGLE_OAUTH_SECRET, \
TWITTER_OAUTH_ID, TWITTER_OAUTH_SECRET, \
FACEBOOK_OAUTH_ID, FACEBOOK_OAUTH_SECRET
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_OAUTH_ID,
consumer_secret=GOOGLE_OAUTH_SECRET)
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1.1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key=TWITTER_OAUTH_ID,
consumer_secret=TWITTER_OAUTH_SECRET)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_OAUTH_ID,
consumer_secret=FACEBOOK_OAUTH_SECRET,
request_token_params={'scope': 'email'})
|
<commit_before><commit_msg>Add login provides in session module.<commit_after>
|
from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
from config import GOOGLE_OAUTH_ID, GOOGLE_OAUTH_SECRET, \
TWITTER_OAUTH_ID, TWITTER_OAUTH_SECRET, \
FACEBOOK_OAUTH_ID, FACEBOOK_OAUTH_SECRET
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_OAUTH_ID,
consumer_secret=GOOGLE_OAUTH_SECRET)
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1.1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key=TWITTER_OAUTH_ID,
consumer_secret=TWITTER_OAUTH_SECRET)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_OAUTH_ID,
consumer_secret=FACEBOOK_OAUTH_SECRET,
request_token_params={'scope': 'email'})
|
Add login provides in session module.from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
from config import GOOGLE_OAUTH_ID, GOOGLE_OAUTH_SECRET, \
TWITTER_OAUTH_ID, TWITTER_OAUTH_SECRET, \
FACEBOOK_OAUTH_ID, FACEBOOK_OAUTH_SECRET
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_OAUTH_ID,
consumer_secret=GOOGLE_OAUTH_SECRET)
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1.1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key=TWITTER_OAUTH_ID,
consumer_secret=TWITTER_OAUTH_SECRET)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_OAUTH_ID,
consumer_secret=FACEBOOK_OAUTH_SECRET,
request_token_params={'scope': 'email'})
|
<commit_before><commit_msg>Add login provides in session module.<commit_after>from flask import Flask, redirect, url_for, session, request
from flask_oauth import OAuth
from config import GOOGLE_OAUTH_ID, GOOGLE_OAUTH_SECRET, \
TWITTER_OAUTH_ID, TWITTER_OAUTH_SECRET, \
FACEBOOK_OAUTH_ID, FACEBOOK_OAUTH_SECRET
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_OAUTH_ID,
consumer_secret=GOOGLE_OAUTH_SECRET)
# Use Twitter as example remote application
twitter = oauth.remote_app('twitter',
# unless absolute urls are used to make requests, this will be added
# before all URLs. This is also true for request_token_url and others.
base_url='https://api.twitter.com/1.1/',
# where flask should look for new request tokens
request_token_url='https://api.twitter.com/oauth/request_token',
# where flask should exchange the token with the remote application
access_token_url='https://api.twitter.com/oauth/access_token',
# twitter knows two authorizatiom URLs. /authorize and /authenticate.
# they mostly work the same, but for sign on /authenticate is
# expected because this will give the user a slightly different
# user interface on the twitter side.
authorize_url='https://api.twitter.com/oauth/authenticate',
# the consumer keys from the twitter application registry.
consumer_key=TWITTER_OAUTH_ID,
consumer_secret=TWITTER_OAUTH_SECRET)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_OAUTH_ID,
consumer_secret=FACEBOOK_OAUTH_SECRET,
request_token_params={'scope': 'email'})
|
|
7cbde0dd69aa6c49ab1483e7aa00bf8e141a6adc
|
src/util/extractMahoutRes.py
|
src/util/extractMahoutRes.py
|
import os, sys
from operator import itemgetter
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
# Verify arguments
if len(sys.argv) != 3:
errorExit("Usage: {} MINSUP FILE\n".format(os.path.basename(sys.argv[0])))
minSup = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
results = dict([])
with (open(fileName, 'rt')) as FILE:
for line in FILE:
tokens = line.split("[")
for token in tokens[1:]:
tokens2 = token.split("]")
itemsetStr = tokens2[0]
items = []
for item in itemsetStr.split(","):
items.append(int(item))
itemset = frozenset(items)
supportStr = (tokens2[1].split(","))[1][:-1]
# Handle end of the line
if supportStr[-1] == ")":
supportStr = supportStr[:-1]
support = int(supportStr)
results[itemset] = support
sortedResults = sorted(results.items(), key=itemgetter(1), reverse=True)
for tup in sortedResults:
if tup[1] >= minSup:
itemsetStr = ""
for item in sorted(tup[0]):
itemsetStr += str(item) + " "
itemsetStr = itemsetStr[:-1]
sys.stdout.write(itemsetStr + "\t" + str(tup[1]) + "\n")
else:
break
if __name__ == '__main__':
main()
|
Add a script to extract the results from a Mahout PFPgrowth run
|
Add a script to extract the results from a Mahout PFPgrowth run
|
Python
|
apache-2.0
|
jdebrabant/parallel_arules,jdebrabant/parallel_arules,jdebrabant/parallel_arules,jdebrabant/parallel_arules
|
Add a script to extract the results from a Mahout PFPgrowth run
|
import os, sys
from operator import itemgetter
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
# Verify arguments
if len(sys.argv) != 3:
errorExit("Usage: {} MINSUP FILE\n".format(os.path.basename(sys.argv[0])))
minSup = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
results = dict([])
with (open(fileName, 'rt')) as FILE:
for line in FILE:
tokens = line.split("[")
for token in tokens[1:]:
tokens2 = token.split("]")
itemsetStr = tokens2[0]
items = []
for item in itemsetStr.split(","):
items.append(int(item))
itemset = frozenset(items)
supportStr = (tokens2[1].split(","))[1][:-1]
# Handle end of the line
if supportStr[-1] == ")":
supportStr = supportStr[:-1]
support = int(supportStr)
results[itemset] = support
sortedResults = sorted(results.items(), key=itemgetter(1), reverse=True)
for tup in sortedResults:
if tup[1] >= minSup:
itemsetStr = ""
for item in sorted(tup[0]):
itemsetStr += str(item) + " "
itemsetStr = itemsetStr[:-1]
sys.stdout.write(itemsetStr + "\t" + str(tup[1]) + "\n")
else:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to extract the results from a Mahout PFPgrowth run<commit_after>
|
import os, sys
from operator import itemgetter
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
# Verify arguments
if len(sys.argv) != 3:
errorExit("Usage: {} MINSUP FILE\n".format(os.path.basename(sys.argv[0])))
minSup = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
results = dict([])
with (open(fileName, 'rt')) as FILE:
for line in FILE:
tokens = line.split("[")
for token in tokens[1:]:
tokens2 = token.split("]")
itemsetStr = tokens2[0]
items = []
for item in itemsetStr.split(","):
items.append(int(item))
itemset = frozenset(items)
supportStr = (tokens2[1].split(","))[1][:-1]
# Handle end of the line
if supportStr[-1] == ")":
supportStr = supportStr[:-1]
support = int(supportStr)
results[itemset] = support
sortedResults = sorted(results.items(), key=itemgetter(1), reverse=True)
for tup in sortedResults:
if tup[1] >= minSup:
itemsetStr = ""
for item in sorted(tup[0]):
itemsetStr += str(item) + " "
itemsetStr = itemsetStr[:-1]
sys.stdout.write(itemsetStr + "\t" + str(tup[1]) + "\n")
else:
break
if __name__ == '__main__':
main()
|
Add a script to extract the results from a Mahout PFPgrowth runimport os, sys
from operator import itemgetter
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
# Verify arguments
if len(sys.argv) != 3:
errorExit("Usage: {} MINSUP FILE\n".format(os.path.basename(sys.argv[0])))
minSup = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
results = dict([])
with (open(fileName, 'rt')) as FILE:
for line in FILE:
tokens = line.split("[")
for token in tokens[1:]:
tokens2 = token.split("]")
itemsetStr = tokens2[0]
items = []
for item in itemsetStr.split(","):
items.append(int(item))
itemset = frozenset(items)
supportStr = (tokens2[1].split(","))[1][:-1]
# Handle end of the line
if supportStr[-1] == ")":
supportStr = supportStr[:-1]
support = int(supportStr)
results[itemset] = support
sortedResults = sorted(results.items(), key=itemgetter(1), reverse=True)
for tup in sortedResults:
if tup[1] >= minSup:
itemsetStr = ""
for item in sorted(tup[0]):
itemsetStr += str(item) + " "
itemsetStr = itemsetStr[:-1]
sys.stdout.write(itemsetStr + "\t" + str(tup[1]) + "\n")
else:
break
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to extract the results from a Mahout PFPgrowth run<commit_after>import os, sys
from operator import itemgetter
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
# Verify arguments
if len(sys.argv) != 3:
errorExit("Usage: {} MINSUP FILE\n".format(os.path.basename(sys.argv[0])))
minSup = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
results = dict([])
with (open(fileName, 'rt')) as FILE:
for line in FILE:
tokens = line.split("[")
for token in tokens[1:]:
tokens2 = token.split("]")
itemsetStr = tokens2[0]
items = []
for item in itemsetStr.split(","):
items.append(int(item))
itemset = frozenset(items)
supportStr = (tokens2[1].split(","))[1][:-1]
# Handle end of the line
if supportStr[-1] == ")":
supportStr = supportStr[:-1]
support = int(supportStr)
results[itemset] = support
sortedResults = sorted(results.items(), key=itemgetter(1), reverse=True)
for tup in sortedResults:
if tup[1] >= minSup:
itemsetStr = ""
for item in sorted(tup[0]):
itemsetStr += str(item) + " "
itemsetStr = itemsetStr[:-1]
sys.stdout.write(itemsetStr + "\t" + str(tup[1]) + "\n")
else:
break
if __name__ == '__main__':
main()
|
|
eab9a6add303d06ac617e0cb219e99600b14e54f
|
utils/parse_registration.py
|
utils/parse_registration.py
|
#!/usr/bin/env python3
import csv
import sys
# For this function to work, the attendance list needs to include the exact fields as described
# below. Otherwise this function won't be able to confirm that the ballot came from the correct
# person (or that the person even registered).
def confirm_id(id, name, org, attendance_list):
#print("MATCH ID for " + name + ", " + org);
for entry in iter(attendance_list):
#print(entry);
if (id == entry['UUID'] and
name == entry['What is your name?'] and
org == entry['What organization will you be representing?']):
return 1;
return 0;
def main():
registration_file=sys.argv[1]; # File with registrations from Google Form
ballots=[];
votes={};
ballot_dict={};
print("Opening file...");
registration_list = list(csv.DictReader(open(registration_file)));
writer = csv.DictWriter(open('attendance.csv', 'w', newline=''), ['name','org','attend'], quoting = csv.QUOTE_ALL);
writer.writeheader();
print("Writing attendance.csv...");
names = {};
for registration in iter(registration_list):
name = registration['What is your name?'];
org = registration['What organization will you be representing?'];
if name in names:
print("Omitting duplicate name: " + name);
continue;
else:
names[name] = 1;
writer.writerow({'name': name, 'org': org, 'attend': '1'});
print("\n=====\n");
print("Move attendance.csv to the appropriate folder.");
if __name__ == '__main__':
main()
|
Add script to parse registration CSV
|
Add script to parse registration CSV
|
Python
|
mit
|
mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io
|
Add script to parse registration CSV
|
#!/usr/bin/env python3
import csv
import sys
# For this function to work, the attendance list needs to include the exact fields as described
# below. Otherwise this function won't be able to confirm that the ballot came from the correct
# person (or that the person even registered).
def confirm_id(id, name, org, attendance_list):
#print("MATCH ID for " + name + ", " + org);
for entry in iter(attendance_list):
#print(entry);
if (id == entry['UUID'] and
name == entry['What is your name?'] and
org == entry['What organization will you be representing?']):
return 1;
return 0;
def main():
registration_file=sys.argv[1]; # File with registrations from Google Form
ballots=[];
votes={};
ballot_dict={};
print("Opening file...");
registration_list = list(csv.DictReader(open(registration_file)));
writer = csv.DictWriter(open('attendance.csv', 'w', newline=''), ['name','org','attend'], quoting = csv.QUOTE_ALL);
writer.writeheader();
print("Writing attendance.csv...");
names = {};
for registration in iter(registration_list):
name = registration['What is your name?'];
org = registration['What organization will you be representing?'];
if name in names:
print("Omitting duplicate name: " + name);
continue;
else:
names[name] = 1;
writer.writerow({'name': name, 'org': org, 'attend': '1'});
print("\n=====\n");
print("Move attendance.csv to the appropriate folder.");
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to parse registration CSV<commit_after>
|
#!/usr/bin/env python3
import csv
import sys
# For this function to work, the attendance list needs to include the exact fields as described
# below. Otherwise this function won't be able to confirm that the ballot came from the correct
# person (or that the person even registered).
def confirm_id(id, name, org, attendance_list):
#print("MATCH ID for " + name + ", " + org);
for entry in iter(attendance_list):
#print(entry);
if (id == entry['UUID'] and
name == entry['What is your name?'] and
org == entry['What organization will you be representing?']):
return 1;
return 0;
def main():
registration_file=sys.argv[1]; # File with registrations from Google Form
ballots=[];
votes={};
ballot_dict={};
print("Opening file...");
registration_list = list(csv.DictReader(open(registration_file)));
writer = csv.DictWriter(open('attendance.csv', 'w', newline=''), ['name','org','attend'], quoting = csv.QUOTE_ALL);
writer.writeheader();
print("Writing attendance.csv...");
names = {};
for registration in iter(registration_list):
name = registration['What is your name?'];
org = registration['What organization will you be representing?'];
if name in names:
print("Omitting duplicate name: " + name);
continue;
else:
names[name] = 1;
writer.writerow({'name': name, 'org': org, 'attend': '1'});
print("\n=====\n");
print("Move attendance.csv to the appropriate folder.");
if __name__ == '__main__':
main()
|
Add script to parse registration CSV#!/usr/bin/env python3
import csv
import sys
# For this function to work, the attendance list needs to include the exact fields as described
# below. Otherwise this function won't be able to confirm that the ballot came from the correct
# person (or that the person even registered).
def confirm_id(id, name, org, attendance_list):
#print("MATCH ID for " + name + ", " + org);
for entry in iter(attendance_list):
#print(entry);
if (id == entry['UUID'] and
name == entry['What is your name?'] and
org == entry['What organization will you be representing?']):
return 1;
return 0;
def main():
registration_file=sys.argv[1]; # File with registrations from Google Form
ballots=[];
votes={};
ballot_dict={};
print("Opening file...");
registration_list = list(csv.DictReader(open(registration_file)));
writer = csv.DictWriter(open('attendance.csv', 'w', newline=''), ['name','org','attend'], quoting = csv.QUOTE_ALL);
writer.writeheader();
print("Writing attendance.csv...");
names = {};
for registration in iter(registration_list):
name = registration['What is your name?'];
org = registration['What organization will you be representing?'];
if name in names:
print("Omitting duplicate name: " + name);
continue;
else:
names[name] = 1;
writer.writerow({'name': name, 'org': org, 'attend': '1'});
print("\n=====\n");
print("Move attendance.csv to the appropriate folder.");
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to parse registration CSV<commit_after>#!/usr/bin/env python3
import csv
import sys
# For this function to work, the attendance list needs to include the exact fields as described
# below. Otherwise this function won't be able to confirm that the ballot came from the correct
# person (or that the person even registered).
def confirm_id(id, name, org, attendance_list):
#print("MATCH ID for " + name + ", " + org);
for entry in iter(attendance_list):
#print(entry);
if (id == entry['UUID'] and
name == entry['What is your name?'] and
org == entry['What organization will you be representing?']):
return 1;
return 0;
def main():
registration_file=sys.argv[1]; # File with registrations from Google Form
ballots=[];
votes={};
ballot_dict={};
print("Opening file...");
registration_list = list(csv.DictReader(open(registration_file)));
writer = csv.DictWriter(open('attendance.csv', 'w', newline=''), ['name','org','attend'], quoting = csv.QUOTE_ALL);
writer.writeheader();
print("Writing attendance.csv...");
names = {};
for registration in iter(registration_list):
name = registration['What is your name?'];
org = registration['What organization will you be representing?'];
if name in names:
print("Omitting duplicate name: " + name);
continue;
else:
names[name] = 1;
writer.writerow({'name': name, 'org': org, 'attend': '1'});
print("\n=====\n");
print("Move attendance.csv to the appropriate folder.");
if __name__ == '__main__':
main()
|
|
6328f5f4bbfa3c20f429f9fa648258cde54ac178
|
cthulhubot/management/commands/restart_masters.py
|
cthulhubot/management/commands/restart_masters.py
|
from django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
try:
b.start()
except:
print 'Failed to start master'
|
Add ability to restart masters from console
|
Add ability to restart masters from console
|
Python
|
bsd-3-clause
|
centrumholdings/cthulhubot
|
Add ability to restart masters from console
|
from django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
try:
b.start()
except:
print 'Failed to start master'
|
<commit_before><commit_msg>Add ability to restart masters from console<commit_after>
|
from django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
try:
b.start()
except:
print 'Failed to start master'
|
Add ability to restart masters from consolefrom django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
try:
b.start()
except:
print 'Failed to start master'
|
<commit_before><commit_msg>Add ability to restart masters from console<commit_after>from django.core.management.base import BaseCommand
from cthulhubot.models import Buildmaster
class Command(BaseCommand):
help = 'Restart all Buildmaster processes'
args = ""
def handle(self, *fixture_labels, **options):
verbosity = int(options.get('verbosity', 1))
commit = int(options.get('commit', 1))
if verbosity > 1:
print 'Restarting buildmasters...'
for b in Buildmaster.objects.all():
if verbosity > 1:
print 'Handling buildmaster %s for project %s' % (str(b.id), str(b.project.name))
try:
b.stop()
except:
print 'Failed to stop master'
try:
b.start()
except:
print 'Failed to start master'
|
|
7400c4d7719d2df46674d8604a535b4c589150ba
|
tests/health_checks/test_per_ld_snp_AND_gwas_snp.py
|
tests/health_checks/test_per_ld_snp_AND_gwas_snp.py
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerLdSnpANDGwasSnp(TestPostgapBase):
def setUp(self):
self.per_ld_snp_and_gwas_snp = self.pg.groupby(['ld_snp_rsID', 'gwas_snp'])
def test_each_ld_snp_rsID_and_gwas_snp_pair_has_unique_r2(self):
self.assert_groupby_series_is_unique_per_group(
self.per_ld_snp_and_gwas_snp.r2
)
if __name__ == '__main__':
unittest.main()
|
Add test per ld snp and gwas snp
|
Add test per ld snp and gwas snp
|
Python
|
apache-2.0
|
Ensembl/cttv024,Ensembl/cttv024
|
Add test per ld snp and gwas snp
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerLdSnpANDGwasSnp(TestPostgapBase):
def setUp(self):
self.per_ld_snp_and_gwas_snp = self.pg.groupby(['ld_snp_rsID', 'gwas_snp'])
def test_each_ld_snp_rsID_and_gwas_snp_pair_has_unique_r2(self):
self.assert_groupby_series_is_unique_per_group(
self.per_ld_snp_and_gwas_snp.r2
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test per ld snp and gwas snp<commit_after>
|
# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerLdSnpANDGwasSnp(TestPostgapBase):
def setUp(self):
self.per_ld_snp_and_gwas_snp = self.pg.groupby(['ld_snp_rsID', 'gwas_snp'])
def test_each_ld_snp_rsID_and_gwas_snp_pair_has_unique_r2(self):
self.assert_groupby_series_is_unique_per_group(
self.per_ld_snp_and_gwas_snp.r2
)
if __name__ == '__main__':
unittest.main()
|
Add test per ld snp and gwas snp# ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerLdSnpANDGwasSnp(TestPostgapBase):
def setUp(self):
self.per_ld_snp_and_gwas_snp = self.pg.groupby(['ld_snp_rsID', 'gwas_snp'])
def test_each_ld_snp_rsID_and_gwas_snp_pair_has_unique_r2(self):
self.assert_groupby_series_is_unique_per_group(
self.per_ld_snp_and_gwas_snp.r2
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test per ld snp and gwas snp<commit_after># ------------------------------------------------
# built-ins
import unittest
# local
from .base import TestPostgapBase
# ------------------------------------------------
class TestPostgapPerLdSnpANDGwasSnp(TestPostgapBase):
def setUp(self):
self.per_ld_snp_and_gwas_snp = self.pg.groupby(['ld_snp_rsID', 'gwas_snp'])
def test_each_ld_snp_rsID_and_gwas_snp_pair_has_unique_r2(self):
self.assert_groupby_series_is_unique_per_group(
self.per_ld_snp_and_gwas_snp.r2
)
if __name__ == '__main__':
unittest.main()
|
|
e198853e9347b908a1ea9880e4eb4433e49e140b
|
DeployUtil/toolsession.py
|
DeployUtil/toolsession.py
|
import urllib.request
import ssl
def create_toolsess_httpsHandler():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
return https_handler
|
Refactor out the SSL handler.
|
Refactor out the SSL handler.
|
Python
|
mit
|
loarabia/DeployUtil
|
Refactor out the SSL handler.
|
import urllib.request
import ssl
def create_toolsess_httpsHandler():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
return https_handler
|
<commit_before><commit_msg>Refactor out the SSL handler.<commit_after>
|
import urllib.request
import ssl
def create_toolsess_httpsHandler():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
return https_handler
|
Refactor out the SSL handler.import urllib.request
import ssl
def create_toolsess_httpsHandler():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
return https_handler
|
<commit_before><commit_msg>Refactor out the SSL handler.<commit_after>import urllib.request
import ssl
def create_toolsess_httpsHandler():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
https_handler = urllib.request.HTTPSHandler(context=context)
return https_handler
|
|
cc41e1ae4ff7055d70d15cb9783935405158a300
|
gchart.py
|
gchart.py
|
# Google Chart wrapper for iampl.
from IPython.core.display import Javascript
def GeoChart(keys, data, **kwargs):
table = "{},\n".format(keys)
for i in data:
table += "['{}', {}],\n".format(i, data[i])
options = ""
for arg, value in kwargs.iteritems():
if isinstance(value, bool):
value = 'true' if value else 'false'
elif isinstance(value, dict):
items = ""
for k, v in value.iteritems():
items += "{}: {},".format(k, v)
value = "{{{}}}".format(items)
options += "{}:{},\n".format(arg, value)
return Javascript("""
container.show();
function draw() {{
var chart = new google.visualization.GeoChart(element[0]);
chart.draw(google.visualization.arrayToDataTable([{}]), {{{}}});
}}
google.load('visualization', '1.0', {{'callback': draw, 'packages':['geochart']}});
""".format(table, options), lib="https://www.google.com/jsapi")
|
Add a wrapper to GeoChart
|
Add a wrapper to GeoChart
|
Python
|
bsd-2-clause
|
vitaut/iampl
|
Add a wrapper to GeoChart
|
# Google Chart wrapper for iampl.
from IPython.core.display import Javascript
def GeoChart(keys, data, **kwargs):
table = "{},\n".format(keys)
for i in data:
table += "['{}', {}],\n".format(i, data[i])
options = ""
for arg, value in kwargs.iteritems():
if isinstance(value, bool):
value = 'true' if value else 'false'
elif isinstance(value, dict):
items = ""
for k, v in value.iteritems():
items += "{}: {},".format(k, v)
value = "{{{}}}".format(items)
options += "{}:{},\n".format(arg, value)
return Javascript("""
container.show();
function draw() {{
var chart = new google.visualization.GeoChart(element[0]);
chart.draw(google.visualization.arrayToDataTable([{}]), {{{}}});
}}
google.load('visualization', '1.0', {{'callback': draw, 'packages':['geochart']}});
""".format(table, options), lib="https://www.google.com/jsapi")
|
<commit_before><commit_msg>Add a wrapper to GeoChart<commit_after>
|
# Google Chart wrapper for iampl.
from IPython.core.display import Javascript
def GeoChart(keys, data, **kwargs):
table = "{},\n".format(keys)
for i in data:
table += "['{}', {}],\n".format(i, data[i])
options = ""
for arg, value in kwargs.iteritems():
if isinstance(value, bool):
value = 'true' if value else 'false'
elif isinstance(value, dict):
items = ""
for k, v in value.iteritems():
items += "{}: {},".format(k, v)
value = "{{{}}}".format(items)
options += "{}:{},\n".format(arg, value)
return Javascript("""
container.show();
function draw() {{
var chart = new google.visualization.GeoChart(element[0]);
chart.draw(google.visualization.arrayToDataTable([{}]), {{{}}});
}}
google.load('visualization', '1.0', {{'callback': draw, 'packages':['geochart']}});
""".format(table, options), lib="https://www.google.com/jsapi")
|
Add a wrapper to GeoChart# Google Chart wrapper for iampl.
from IPython.core.display import Javascript
def GeoChart(keys, data, **kwargs):
table = "{},\n".format(keys)
for i in data:
table += "['{}', {}],\n".format(i, data[i])
options = ""
for arg, value in kwargs.iteritems():
if isinstance(value, bool):
value = 'true' if value else 'false'
elif isinstance(value, dict):
items = ""
for k, v in value.iteritems():
items += "{}: {},".format(k, v)
value = "{{{}}}".format(items)
options += "{}:{},\n".format(arg, value)
return Javascript("""
container.show();
function draw() {{
var chart = new google.visualization.GeoChart(element[0]);
chart.draw(google.visualization.arrayToDataTable([{}]), {{{}}});
}}
google.load('visualization', '1.0', {{'callback': draw, 'packages':['geochart']}});
""".format(table, options), lib="https://www.google.com/jsapi")
|
<commit_before><commit_msg>Add a wrapper to GeoChart<commit_after># Google Chart wrapper for iampl.
from IPython.core.display import Javascript
def GeoChart(keys, data, **kwargs):
table = "{},\n".format(keys)
for i in data:
table += "['{}', {}],\n".format(i, data[i])
options = ""
for arg, value in kwargs.iteritems():
if isinstance(value, bool):
value = 'true' if value else 'false'
elif isinstance(value, dict):
items = ""
for k, v in value.iteritems():
items += "{}: {},".format(k, v)
value = "{{{}}}".format(items)
options += "{}:{},\n".format(arg, value)
return Javascript("""
container.show();
function draw() {{
var chart = new google.visualization.GeoChart(element[0]);
chart.draw(google.visualization.arrayToDataTable([{}]), {{{}}});
}}
google.load('visualization', '1.0', {{'callback': draw, 'packages':['geochart']}});
""".format(table, options), lib="https://www.google.com/jsapi")
|
|
7bbf2f7f9f7c7c49287519207e56932e28061514
|
nova/tests/virt_unittest.py
|
nova/tests/virt_unittest.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add a few unit tests for libvirt_conn.
|
Add a few unit tests for libvirt_conn.
|
Python
|
apache-2.0
|
alexandrucoman/vbox-nova-driver,bclau/nova,phenoxim/nova,KarimAllah/nova,Juniper/nova,dawnpower/nova,Triv90/Nova,mikalstill/nova,yosshy/nova,sacharya/nova,ted-gould/nova,Stavitsky/nova,nikesh-mahalka/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,houshengbo/nova_vmware_compute_driver,yatinkumbhare/openstack-nova,eayunstack/nova,saleemjaveds/https-github.com-openstack-nova,petrutlucian94/nova_dev,belmiromoreira/nova,openstack/nova,vmturbo/nova,cyx1231st/nova,cloudbase/nova-virtualbox,paulmathews/nova,gooddata/openstack-nova,fnordahl/nova,usc-isi/nova,zzicewind/nova,maoy/zknova,josephsuh/extra-specs,shahar-stratoscale/nova,fajoy/nova,ruslanloman/nova,watonyweng/nova,Tehsmash/nova,badock/nova,devendermishrajio/nova_test_latest,tudorvio/nova,barnsnake351/nova,mgagne/nova,Triv90/Nova,Juniper/nova,CloudServer/nova,edulramirez/nova,jeffrey4l/nova,CloudServer/nova,Juniper/nova,akash1808/nova,cyx1231st/nova,KarimAllah/nova,Metaswitch/calico-nova,BeyondTheClouds/nova,kimjaejoong/nova,sridevikoushik31/nova,NoBodyCam/TftpPxeBootBareMetal,shootstar/novatest,sebrandon1/nova,apporc/nova,virtualopensystems/nova,affo/nova,DirectXMan12/nova-hacking,zzicewind/nova,scripnichenko/nova,NoBodyCam/TftpPxeBootBareMetal,iuliat/nova,eonpatapon/nova,raildo/nova,petrutlucian94/nova,maheshp/novatest,psiwczak/openstack,Metaswitch/calico-nova,thomasem/nova,gspilio/nova,termie/pupa,psiwczak/openstack,devendermishrajio/nova,Yusuke1987/openstack_template,JianyuWang/nova,eneabio/nova,cloudbase/nova-virtualbox,CCI-MOC/nova,yrobla/nova,maoy/zknova,viggates/nova,fajoy/nova,gooddata/openstack-nova,NoBodyCam/TftpPxeBootBareMetal,cloudbase/nova,projectcalico/calico-nova,gspilio/nova,dims/nova,jeffrey4l/nova,plumgrid/plumgrid-nova,dawnpower/nova,angdraug/nova,usc-isi/extra-specs,gspilio/nova,Yuriy-Leonov/nova,rrader/nova-docker-plugin,salv-orlando/MyRepo,russellb/nova,tealover/nova,varunarya10/nova_test_latest,double12gzh/nova,vladikr/nova_drafts,ewindisch/nova,termie/nova-migration-demo,maheshp/novatest,petrutlucian94/nova,sileht/deb-openstack-nova,superstack/nova,tianweizhang/nova,salv-orlando/MyRepo,mahak/nova,sridevikoushik31/nova,cloudbase/nova,rickerc/nova_audit,bgxavier/nova,aristanetworks/arista-ovs-nova,termie/nova-migration-demo,DirectXMan12/nova-hacking,Brocade-OpenSource/OpenStack-DNRM-Nova,jianghuaw/nova,CiscoSystems/nova,SUSE-Cloud/nova,takeshineshiro/nova,rajalokan/nova,yosshy/nova,shail2810/nova,joker946/nova,Yusuke1987/openstack_template,dstroppa/openstack-smartos-nova-grizzly,spring-week-topos/nova-week,whitepages/nova,eneabio/nova,savi-dev/nova,jianghuaw/nova,ruslanloman/nova,felixma/nova,mgagne/nova,paulmathews/nova,leilihh/nova,fnordahl/nova,bclau/nova,silenceli/nova,NewpTone/stacklab-nova,superstack/nova,tudorvio/nova,jianghuaw/nova,Stavitsky/nova,eayunstack/nova,CiscoSystems/nova,cernops/nova,anotherjesse/nova,mahak/nova,ted-gould/nova,tianweizhang/nova,citrix-openstack-build/nova,tangfeixiong/nova,josephsuh/extra-specs,cloudbau/nova,dims/nova,KarimAllah/nova,zaina/nova,cloudbase/nova,berrange/nova,Francis-Liu/animated-broccoli,sileht/deb-openstack-nova,OpenAcademy-OpenStack/nova-scheduler,petrutlucian94/nova_dev,JioCloud/nova,shail2810/nova,badock/nova,LoHChina/nova,DirectXMan12/nova-hacking,maelnor/nova,usc-isi/extra-specs,fajoy/nova,scripnichenko/nova,blueboxgroup/nova,Tehsmash/nova,apporc/nova,rajalokan/nova,TieWei/nova,openstack/nova,russellb/nova,imsplitbit/nova,double12gzh/nova,ntt-sic/nova,NewpTone/stacklab-nova,dstroppa/openstack-smartos-nova-grizzly,TwinkleChawla/nova,maoy/zknova,hanlind/nova,anotherjesse/nova,eharney/nova,projectcalico/calico-nova,termie/pupa,josephsuh/extra-specs,bigswitch/nova,leilihh/novaha,virtualopensystems/nova,kimjaejoong/nova,CEG-FYP-OpenStack/scheduler,gooddata/openstack-nova,houshengbo/nova_vmware_compute_driver,ntt-sic/nova,imsplitbit/nova,paulmathews/nova,Yuriy-Leonov/nova,termie/pupa,BeyondTheClouds/nova,TieWei/nova,eonpatapon/nova,watonyweng/nova,mikalstill/nova,mandeepdhami/nova,sebrandon1/nova,SUSE-Cloud/nova,berrange/nova,psiwczak/openstack,akash1808/nova_test_latest,yrobla/nova,maelnor/nova,tanglei528/nova,Francis-Liu/animated-broccoli,shahar-stratoscale/nova,angdraug/nova,MountainWei/nova,alvarolopez/nova,russellb/nova,sridevikoushik31/openstack,belmiromoreira/nova,tealover/nova,aristanetworks/arista-ovs-nova,devendermishrajio/nova_test_latest,adelina-t/nova,orbitfp7/nova,varunarya10/nova_test_latest,houshengbo/nova_vmware_compute_driver,klmitch/nova,alaski/nova,mmnelemane/nova,raildo/nova,sridevikoushik31/nova,vmturbo/nova,klmitch/nova,usc-isi/nova,tanglei528/nova,devoid/nova,rickerc/nova_audit,JioCloud/nova_test_latest,savi-dev/nova,JioCloud/nova_test_latest,alexandrucoman/vbox-nova-driver,isyippee/nova,thomasem/nova,JioCloud/nova,zaina/nova,BeyondTheClouds/nova,TwinkleChawla/nova,sebrandon1/nova,Triv90/Nova,redhat-openstack/nova,affo/nova,devoid/nova,gooddata/openstack-nova,leilihh/nova,luogangyi/bcec-nova,rahulunair/nova,citrix-openstack-build/nova,saleemjaveds/https-github.com-openstack-nova,zhimin711/nova,superstack/nova,leilihh/novaha,eneabio/nova,aristanetworks/arista-ovs-nova,klmitch/nova,viggates/nova,j-carpentier/nova,mahak/nova,adelina-t/nova,CCI-MOC/nova,vmturbo/nova,noironetworks/nova,bigswitch/nova,rrader/nova-docker-plugin,silenceli/nova,yatinkumbhare/openstack-nova,nikesh-mahalka/nova,shootstar/novatest,hanlind/nova,akash1808/nova,qwefi/nova,termie/nova-migration-demo,rajalokan/nova,NeCTAR-RC/nova,jianghuaw/nova,sridevikoushik31/nova,yrobla/nova,zhimin711/nova,rajalokan/nova,j-carpentier/nova,spring-week-topos/nova-week,hanlind/nova,eharney/nova,tangfeixiong/nova,vmturbo/nova,isyippee/nova,sileht/deb-openstack-nova,orbitfp7/nova,openstack/nova,rahulunair/nova,CEG-FYP-OpenStack/scheduler,sacharya/nova,usc-isi/extra-specs,OpenAcademy-OpenStack/nova-scheduler,felixma/nova,barnsnake351/nova,cernops/nova,klmitch/nova,plumgrid/plumgrid-nova,sridevikoushik31/openstack,phenoxim/nova,MountainWei/nova,NeCTAR-RC/nova,whitepages/nova,NewpTone/stacklab-nova,joker946/nova,devendermishrajio/nova,savi-dev/nova,alaski/nova,bgxavier/nova,vladikr/nova_drafts,noironetworks/nova,JianyuWang/nova,ewindisch/nova,takeshineshiro/nova,mikalstill/nova,mandeepdhami/nova,dstroppa/openstack-smartos-nova-grizzly,akash1808/nova_test_latest,blueboxgroup/nova,sridevikoushik31/openstack,usc-isi/nova,redhat-openstack/nova,luogangyi/bcec-nova,anotherjesse/nova,LoHChina/nova,qwefi/nova,cloudbau/nova,mmnelemane/nova,iuliat/nova,salv-orlando/MyRepo,Juniper/nova,edulramirez/nova,cernops/nova,rahulunair/nova,maheshp/novatest,alvarolopez/nova
|
Add a few unit tests for libvirt_conn.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add a few unit tests for libvirt_conn.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
Add a few unit tests for libvirt_conn.# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
<commit_before><commit_msg>Add a few unit tests for libvirt_conn.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import test
from nova.virt import libvirt_conn
FLAGS = flags.FLAGS
class LibvirtConnTestCase(test.TrialTestCase):
def test_get_uri_and_template(self):
class MockDataModel(object):
def __init__(self):
self.datamodel = { 'name' : 'i-cafebabe',
'memory_kb' : '1024000',
'basepath' : '/some/path',
'bridge_name' : 'br100',
'mac_address' : '02:12:34:46:56:67',
'vcpus' : 2 }
type_uri_map = { 'qemu' : ('qemu:///system',
[lambda s: '<domain type=\'qemu\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/kvm' not in s]),
'kvm' : ('qemu:///system',
[lambda s: '<domain type=\'kvm\'>' in s,
lambda s: 'type>hvm</type' in s,
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
'uml' : ('uml:///system',
[lambda s: '<domain type=\'uml\'>' in s,
lambda s: 'type>uml</type' in s]),
}
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, expected_uri)
for i, check in enumerate(checks):
xml = conn.toXml(MockDataModel())
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the FLAGS.
testuri = 'something completely different'
FLAGS.libvirt_uri = testuri
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
conn = libvirt_conn.LibvirtConnection(True)
uri, template = conn.get_uri_and_template()
self.assertEquals(uri, testuri)
|
|
3359263aae0c30e1386f121f77a38c4d71d63c06
|
yatsm/vegetation_indices.py
|
yatsm/vegetation_indices.py
|
""" Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
|
Create new module for calculation of veg indices; add EVI
|
Create new module for calculation of veg indices; add EVI
|
Python
|
mit
|
c11/yatsm,valpasq/yatsm,ceholden/yatsm,jmorton/yatsm,jmorton/yatsm,ceholden/yatsm,jmorton/yatsm,c11/yatsm,valpasq/yatsm
|
Create new module for calculation of veg indices; add EVI
|
""" Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
|
<commit_before><commit_msg>Create new module for calculation of veg indices; add EVI<commit_after>
|
""" Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
|
Create new module for calculation of veg indices; add EVI""" Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
|
<commit_before><commit_msg>Create new module for calculation of veg indices; add EVI<commit_after>""" Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
|
|
3eebd3fc158cfc8a20a6031aca012f87b5aa0ce5
|
modoboa/admin/migrations/0018_update_disabled_accounts_aliases.py
|
modoboa/admin/migrations/0018_update_disabled_accounts_aliases.py
|
# Generated by Django 2.2.17 on 2021-01-28 13:40
from django.db import migrations
def update_disabled_accounts_aliases(apps, schema_editor):
User = apps.get_model('core', 'User')
Alias = apps.get_model('admin', 'Alias')
disabled_accounts = User.objects.filter(
mailbox__isnull=False, is_active=False)
Alias.objects.filter(
address__in=list(disabled_accounts.values_list('email', flat=True)),
enabled=True,
).update(enabled=False)
class Migration(migrations.Migration):
dependencies = [
('admin', '0017_alarm'),
('core', '0022_user_tfa_enabled'),
]
operations = [
migrations.RunPython(update_disabled_accounts_aliases),
]
|
Add migration to disable disabled accounts' aliases
|
Add migration to disable disabled accounts' aliases
|
Python
|
isc
|
bearstech/modoboa,modoboa/modoboa,bearstech/modoboa,modoboa/modoboa,modoboa/modoboa,modoboa/modoboa,bearstech/modoboa,bearstech/modoboa
|
Add migration to disable disabled accounts' aliases
|
# Generated by Django 2.2.17 on 2021-01-28 13:40
from django.db import migrations
def update_disabled_accounts_aliases(apps, schema_editor):
User = apps.get_model('core', 'User')
Alias = apps.get_model('admin', 'Alias')
disabled_accounts = User.objects.filter(
mailbox__isnull=False, is_active=False)
Alias.objects.filter(
address__in=list(disabled_accounts.values_list('email', flat=True)),
enabled=True,
).update(enabled=False)
class Migration(migrations.Migration):
dependencies = [
('admin', '0017_alarm'),
('core', '0022_user_tfa_enabled'),
]
operations = [
migrations.RunPython(update_disabled_accounts_aliases),
]
|
<commit_before><commit_msg>Add migration to disable disabled accounts' aliases<commit_after>
|
# Generated by Django 2.2.17 on 2021-01-28 13:40
from django.db import migrations
def update_disabled_accounts_aliases(apps, schema_editor):
User = apps.get_model('core', 'User')
Alias = apps.get_model('admin', 'Alias')
disabled_accounts = User.objects.filter(
mailbox__isnull=False, is_active=False)
Alias.objects.filter(
address__in=list(disabled_accounts.values_list('email', flat=True)),
enabled=True,
).update(enabled=False)
class Migration(migrations.Migration):
dependencies = [
('admin', '0017_alarm'),
('core', '0022_user_tfa_enabled'),
]
operations = [
migrations.RunPython(update_disabled_accounts_aliases),
]
|
Add migration to disable disabled accounts' aliases# Generated by Django 2.2.17 on 2021-01-28 13:40
from django.db import migrations
def update_disabled_accounts_aliases(apps, schema_editor):
User = apps.get_model('core', 'User')
Alias = apps.get_model('admin', 'Alias')
disabled_accounts = User.objects.filter(
mailbox__isnull=False, is_active=False)
Alias.objects.filter(
address__in=list(disabled_accounts.values_list('email', flat=True)),
enabled=True,
).update(enabled=False)
class Migration(migrations.Migration):
dependencies = [
('admin', '0017_alarm'),
('core', '0022_user_tfa_enabled'),
]
operations = [
migrations.RunPython(update_disabled_accounts_aliases),
]
|
<commit_before><commit_msg>Add migration to disable disabled accounts' aliases<commit_after># Generated by Django 2.2.17 on 2021-01-28 13:40
from django.db import migrations
def update_disabled_accounts_aliases(apps, schema_editor):
User = apps.get_model('core', 'User')
Alias = apps.get_model('admin', 'Alias')
disabled_accounts = User.objects.filter(
mailbox__isnull=False, is_active=False)
Alias.objects.filter(
address__in=list(disabled_accounts.values_list('email', flat=True)),
enabled=True,
).update(enabled=False)
class Migration(migrations.Migration):
dependencies = [
('admin', '0017_alarm'),
('core', '0022_user_tfa_enabled'),
]
operations = [
migrations.RunPython(update_disabled_accounts_aliases),
]
|
|
04b7d0ad075fcc35049f3a448bc8aee61804b7a4
|
check_applied.py
|
check_applied.py
|
#!/usr/bin/python
import mysql.connector
from mysql.connector import errorcode
import re
import os
import string
import sys
import subprocess
import auto_merge
def check_if_applied(cnx, cursor, commit):
subprocess.check_output(['git', 'reset', '--hard'])
update_patch = ("UPDATE patch SET status=%s WHERE btc_commit_id=%s")
try:
subprocess.check_output(['git', 'cherry-pick', commit, '--no-commit', '--strategy=recursive', '--strategy-option=theirs'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print "Marking commit " + commit + " as collision"
cursor.execute(update_patch, ('Collision', commit))
cnx.commit()
return
diff = subprocess.check_output(['git', 'diff', 'HEAD'])
if (len(diff) == 0):
print "Marking commit " + commit + " as applied"
cursor.execute(update_patch, ('Applied', commit))
cnx.commit()
else:
cursor.execute(update_patch, ('Available', commit))
cnx.commit()
return
commits = []
query = ("SELECT btc_commit_id FROM patch WHERE status IS NULL ORDER BY batch, batch_sequence")
try:
cnx = auto_merge.get_connection('config.yml')
try:
cursor = cnx.cursor()
try:
cursor.execute(query)
# Copy out the list before we start modifying the database
for (commit) in cursor:
commits.append(commit[0])
print commits
cwd = os.getcwd()
os.chdir('dogecoin')
for commit in commits:
check_if_applied(cnx, cursor, commit)
os.chdir(cwd)
finally:
cursor.close()
finally:
cnx.close()
except mysql.connector.Error as err:
print err
sys.exit(1)
|
Add script to test if patches have been applied
|
Add script to test if patches have been applied
|
Python
|
mit
|
rnicoll/robodoge
|
Add script to test if patches have been applied
|
#!/usr/bin/python
import mysql.connector
from mysql.connector import errorcode
import re
import os
import string
import sys
import subprocess
import auto_merge
def check_if_applied(cnx, cursor, commit):
subprocess.check_output(['git', 'reset', '--hard'])
update_patch = ("UPDATE patch SET status=%s WHERE btc_commit_id=%s")
try:
subprocess.check_output(['git', 'cherry-pick', commit, '--no-commit', '--strategy=recursive', '--strategy-option=theirs'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print "Marking commit " + commit + " as collision"
cursor.execute(update_patch, ('Collision', commit))
cnx.commit()
return
diff = subprocess.check_output(['git', 'diff', 'HEAD'])
if (len(diff) == 0):
print "Marking commit " + commit + " as applied"
cursor.execute(update_patch, ('Applied', commit))
cnx.commit()
else:
cursor.execute(update_patch, ('Available', commit))
cnx.commit()
return
commits = []
query = ("SELECT btc_commit_id FROM patch WHERE status IS NULL ORDER BY batch, batch_sequence")
try:
cnx = auto_merge.get_connection('config.yml')
try:
cursor = cnx.cursor()
try:
cursor.execute(query)
# Copy out the list before we start modifying the database
for (commit) in cursor:
commits.append(commit[0])
print commits
cwd = os.getcwd()
os.chdir('dogecoin')
for commit in commits:
check_if_applied(cnx, cursor, commit)
os.chdir(cwd)
finally:
cursor.close()
finally:
cnx.close()
except mysql.connector.Error as err:
print err
sys.exit(1)
|
<commit_before><commit_msg>Add script to test if patches have been applied<commit_after>
|
#!/usr/bin/python
import mysql.connector
from mysql.connector import errorcode
import re
import os
import string
import sys
import subprocess
import auto_merge
def check_if_applied(cnx, cursor, commit):
subprocess.check_output(['git', 'reset', '--hard'])
update_patch = ("UPDATE patch SET status=%s WHERE btc_commit_id=%s")
try:
subprocess.check_output(['git', 'cherry-pick', commit, '--no-commit', '--strategy=recursive', '--strategy-option=theirs'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print "Marking commit " + commit + " as collision"
cursor.execute(update_patch, ('Collision', commit))
cnx.commit()
return
diff = subprocess.check_output(['git', 'diff', 'HEAD'])
if (len(diff) == 0):
print "Marking commit " + commit + " as applied"
cursor.execute(update_patch, ('Applied', commit))
cnx.commit()
else:
cursor.execute(update_patch, ('Available', commit))
cnx.commit()
return
commits = []
query = ("SELECT btc_commit_id FROM patch WHERE status IS NULL ORDER BY batch, batch_sequence")
try:
cnx = auto_merge.get_connection('config.yml')
try:
cursor = cnx.cursor()
try:
cursor.execute(query)
# Copy out the list before we start modifying the database
for (commit) in cursor:
commits.append(commit[0])
print commits
cwd = os.getcwd()
os.chdir('dogecoin')
for commit in commits:
check_if_applied(cnx, cursor, commit)
os.chdir(cwd)
finally:
cursor.close()
finally:
cnx.close()
except mysql.connector.Error as err:
print err
sys.exit(1)
|
Add script to test if patches have been applied#!/usr/bin/python
import mysql.connector
from mysql.connector import errorcode
import re
import os
import string
import sys
import subprocess
import auto_merge
def check_if_applied(cnx, cursor, commit):
subprocess.check_output(['git', 'reset', '--hard'])
update_patch = ("UPDATE patch SET status=%s WHERE btc_commit_id=%s")
try:
subprocess.check_output(['git', 'cherry-pick', commit, '--no-commit', '--strategy=recursive', '--strategy-option=theirs'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print "Marking commit " + commit + " as collision"
cursor.execute(update_patch, ('Collision', commit))
cnx.commit()
return
diff = subprocess.check_output(['git', 'diff', 'HEAD'])
if (len(diff) == 0):
print "Marking commit " + commit + " as applied"
cursor.execute(update_patch, ('Applied', commit))
cnx.commit()
else:
cursor.execute(update_patch, ('Available', commit))
cnx.commit()
return
commits = []
query = ("SELECT btc_commit_id FROM patch WHERE status IS NULL ORDER BY batch, batch_sequence")
try:
cnx = auto_merge.get_connection('config.yml')
try:
cursor = cnx.cursor()
try:
cursor.execute(query)
# Copy out the list before we start modifying the database
for (commit) in cursor:
commits.append(commit[0])
print commits
cwd = os.getcwd()
os.chdir('dogecoin')
for commit in commits:
check_if_applied(cnx, cursor, commit)
os.chdir(cwd)
finally:
cursor.close()
finally:
cnx.close()
except mysql.connector.Error as err:
print err
sys.exit(1)
|
<commit_before><commit_msg>Add script to test if patches have been applied<commit_after>#!/usr/bin/python
import mysql.connector
from mysql.connector import errorcode
import re
import os
import string
import sys
import subprocess
import auto_merge
def check_if_applied(cnx, cursor, commit):
subprocess.check_output(['git', 'reset', '--hard'])
update_patch = ("UPDATE patch SET status=%s WHERE btc_commit_id=%s")
try:
subprocess.check_output(['git', 'cherry-pick', commit, '--no-commit', '--strategy=recursive', '--strategy-option=theirs'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print "Marking commit " + commit + " as collision"
cursor.execute(update_patch, ('Collision', commit))
cnx.commit()
return
diff = subprocess.check_output(['git', 'diff', 'HEAD'])
if (len(diff) == 0):
print "Marking commit " + commit + " as applied"
cursor.execute(update_patch, ('Applied', commit))
cnx.commit()
else:
cursor.execute(update_patch, ('Available', commit))
cnx.commit()
return
commits = []
query = ("SELECT btc_commit_id FROM patch WHERE status IS NULL ORDER BY batch, batch_sequence")
try:
cnx = auto_merge.get_connection('config.yml')
try:
cursor = cnx.cursor()
try:
cursor.execute(query)
# Copy out the list before we start modifying the database
for (commit) in cursor:
commits.append(commit[0])
print commits
cwd = os.getcwd()
os.chdir('dogecoin')
for commit in commits:
check_if_applied(cnx, cursor, commit)
os.chdir(cwd)
finally:
cursor.close()
finally:
cnx.close()
except mysql.connector.Error as err:
print err
sys.exit(1)
|
|
c61b7b9e7bcc3531ca0f7d4a1e5b9dc074855418
|
src/auspex/instruments/lecroy.py
|
src/auspex/instruments/lecroy.py
|
__all__ = ['HDO6104']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, FloatCommand, IntCommand, Command
import numpy as np
import time
class HDO6104(SCPIInstrument):
channel_enabled = Command(scpi_string="C{channel}:TRA",
additional_args=["channel"],value_map={True:"ON",False:"OFF"})
sample_points = IntCommand(scpi_string="MEMORY_SIZE")
def connect(self, resource_name=None, interface_type=None):
super(HDO6104,self).connect(resource_name=resource_name,interface_type=interface_type)
self.interface.write("COMM_HEADER OFF")
self.interface._resource.read_termination = u"\n"
def get_info(self,channel=1):
raw_info = self.interface.query("C%d:INSPECT? WAVEDESC" %channel).split("\r\n")[1:-1]
info = [item.split(':') for item in raw_info]
return {k[0].strip(): k[1].strip() for k in info}
def fetch_waveform(self,channel):
# Send the MSB first
self.interface.write("COMM_ORDER HI")
self.interface.write("COMM_FORMAT DEF9,WORD,BIN")
mydict = self.get_info(channel=channel)
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Read waveform data
y_axis = np.array(self.interface.query_binary_values('C%d:WAVEFORM? DAT1' % channel, datatype='h', is_big_endian=True))
y_axis = y_axis*yincrement - yorigin
x_axis = xorigin + np.arange(0, xincrement*len(y_axis), xincrement)
return x_axis, y_axis
|
Add Lecroy HDO6104 instrument class
|
Add Lecroy HDO6104 instrument class
|
Python
|
apache-2.0
|
BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex
|
Add Lecroy HDO6104 instrument class
|
__all__ = ['HDO6104']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, FloatCommand, IntCommand, Command
import numpy as np
import time
class HDO6104(SCPIInstrument):
channel_enabled = Command(scpi_string="C{channel}:TRA",
additional_args=["channel"],value_map={True:"ON",False:"OFF"})
sample_points = IntCommand(scpi_string="MEMORY_SIZE")
def connect(self, resource_name=None, interface_type=None):
super(HDO6104,self).connect(resource_name=resource_name,interface_type=interface_type)
self.interface.write("COMM_HEADER OFF")
self.interface._resource.read_termination = u"\n"
def get_info(self,channel=1):
raw_info = self.interface.query("C%d:INSPECT? WAVEDESC" %channel).split("\r\n")[1:-1]
info = [item.split(':') for item in raw_info]
return {k[0].strip(): k[1].strip() for k in info}
def fetch_waveform(self,channel):
# Send the MSB first
self.interface.write("COMM_ORDER HI")
self.interface.write("COMM_FORMAT DEF9,WORD,BIN")
mydict = self.get_info(channel=channel)
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Read waveform data
y_axis = np.array(self.interface.query_binary_values('C%d:WAVEFORM? DAT1' % channel, datatype='h', is_big_endian=True))
y_axis = y_axis*yincrement - yorigin
x_axis = xorigin + np.arange(0, xincrement*len(y_axis), xincrement)
return x_axis, y_axis
|
<commit_before><commit_msg>Add Lecroy HDO6104 instrument class<commit_after>
|
__all__ = ['HDO6104']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, FloatCommand, IntCommand, Command
import numpy as np
import time
class HDO6104(SCPIInstrument):
channel_enabled = Command(scpi_string="C{channel}:TRA",
additional_args=["channel"],value_map={True:"ON",False:"OFF"})
sample_points = IntCommand(scpi_string="MEMORY_SIZE")
def connect(self, resource_name=None, interface_type=None):
super(HDO6104,self).connect(resource_name=resource_name,interface_type=interface_type)
self.interface.write("COMM_HEADER OFF")
self.interface._resource.read_termination = u"\n"
def get_info(self,channel=1):
raw_info = self.interface.query("C%d:INSPECT? WAVEDESC" %channel).split("\r\n")[1:-1]
info = [item.split(':') for item in raw_info]
return {k[0].strip(): k[1].strip() for k in info}
def fetch_waveform(self,channel):
# Send the MSB first
self.interface.write("COMM_ORDER HI")
self.interface.write("COMM_FORMAT DEF9,WORD,BIN")
mydict = self.get_info(channel=channel)
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Read waveform data
y_axis = np.array(self.interface.query_binary_values('C%d:WAVEFORM? DAT1' % channel, datatype='h', is_big_endian=True))
y_axis = y_axis*yincrement - yorigin
x_axis = xorigin + np.arange(0, xincrement*len(y_axis), xincrement)
return x_axis, y_axis
|
Add Lecroy HDO6104 instrument class__all__ = ['HDO6104']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, FloatCommand, IntCommand, Command
import numpy as np
import time
class HDO6104(SCPIInstrument):
channel_enabled = Command(scpi_string="C{channel}:TRA",
additional_args=["channel"],value_map={True:"ON",False:"OFF"})
sample_points = IntCommand(scpi_string="MEMORY_SIZE")
def connect(self, resource_name=None, interface_type=None):
super(HDO6104,self).connect(resource_name=resource_name,interface_type=interface_type)
self.interface.write("COMM_HEADER OFF")
self.interface._resource.read_termination = u"\n"
def get_info(self,channel=1):
raw_info = self.interface.query("C%d:INSPECT? WAVEDESC" %channel).split("\r\n")[1:-1]
info = [item.split(':') for item in raw_info]
return {k[0].strip(): k[1].strip() for k in info}
def fetch_waveform(self,channel):
# Send the MSB first
self.interface.write("COMM_ORDER HI")
self.interface.write("COMM_FORMAT DEF9,WORD,BIN")
mydict = self.get_info(channel=channel)
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Read waveform data
y_axis = np.array(self.interface.query_binary_values('C%d:WAVEFORM? DAT1' % channel, datatype='h', is_big_endian=True))
y_axis = y_axis*yincrement - yorigin
x_axis = xorigin + np.arange(0, xincrement*len(y_axis), xincrement)
return x_axis, y_axis
|
<commit_before><commit_msg>Add Lecroy HDO6104 instrument class<commit_after>__all__ = ['HDO6104']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, FloatCommand, IntCommand, Command
import numpy as np
import time
class HDO6104(SCPIInstrument):
channel_enabled = Command(scpi_string="C{channel}:TRA",
additional_args=["channel"],value_map={True:"ON",False:"OFF"})
sample_points = IntCommand(scpi_string="MEMORY_SIZE")
def connect(self, resource_name=None, interface_type=None):
super(HDO6104,self).connect(resource_name=resource_name,interface_type=interface_type)
self.interface.write("COMM_HEADER OFF")
self.interface._resource.read_termination = u"\n"
def get_info(self,channel=1):
raw_info = self.interface.query("C%d:INSPECT? WAVEDESC" %channel).split("\r\n")[1:-1]
info = [item.split(':') for item in raw_info]
return {k[0].strip(): k[1].strip() for k in info}
def fetch_waveform(self,channel):
# Send the MSB first
self.interface.write("COMM_ORDER HI")
self.interface.write("COMM_FORMAT DEF9,WORD,BIN")
mydict = self.get_info(channel=channel)
points = int(mydict["PNTS_PER_SCREEN"])
xincrement = float(mydict["HORIZ_INTERVAL"])
xorigin = float(mydict["HORIZ_OFFSET"])
yincrement = float(mydict["VERTICAL_GAIN"])
yorigin = float(mydict["VERTICAL_OFFSET"])
# Read waveform data
y_axis = np.array(self.interface.query_binary_values('C%d:WAVEFORM? DAT1' % channel, datatype='h', is_big_endian=True))
y_axis = y_axis*yincrement - yorigin
x_axis = xorigin + np.arange(0, xincrement*len(y_axis), xincrement)
return x_axis, y_axis
|
|
c856a4e6f0893d66728b48e4e937adac0f7e5e97
|
hil/migrations/versions/76529f0f9e50_add_uuid_and_status_to_networkingaction.py
|
hil/migrations/versions/76529f0f9e50_add_uuid_and_status_to_networkingaction.py
|
"""add uuid and status to NetworkingAction
Revision ID: 76529f0f9e50
Revises: 9089fa811a2b
Create Date: 2018-01-07 15:24:09.545021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76529f0f9e50'
down_revision = '9089fa811a2b'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
op.execute("UPDATE networking_action SET status = 'PENDING', \
uuid ='no-uuid'")
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
# ### end Alembic commands ###
|
Add a migration script for networking_action to add the new columns
|
Add a migration script for networking_action to add the new columns
If there are entries in the networking action table that we upgrading, that
means those are pending actions; so the script sets their status to PENDING.
And since when those actions were queued, the users didn't have a uuid to poll
on anyway, it just puts some string as uuids.
|
Python
|
apache-2.0
|
SahilTikale/haas,CCI-MOC/haas
|
Add a migration script for networking_action to add the new columns
If there are entries in the networking action table that we upgrading, that
means those are pending actions; so the script sets their status to PENDING.
And since when those actions were queued, the users didn't have a uuid to poll
on anyway, it just puts some string as uuids.
|
"""add uuid and status to NetworkingAction
Revision ID: 76529f0f9e50
Revises: 9089fa811a2b
Create Date: 2018-01-07 15:24:09.545021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76529f0f9e50'
down_revision = '9089fa811a2b'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
op.execute("UPDATE networking_action SET status = 'PENDING', \
uuid ='no-uuid'")
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add a migration script for networking_action to add the new columns
If there are entries in the networking action table that we upgrading, that
means those are pending actions; so the script sets their status to PENDING.
And since when those actions were queued, the users didn't have a uuid to poll
on anyway, it just puts some string as uuids.<commit_after>
|
"""add uuid and status to NetworkingAction
Revision ID: 76529f0f9e50
Revises: 9089fa811a2b
Create Date: 2018-01-07 15:24:09.545021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76529f0f9e50'
down_revision = '9089fa811a2b'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
op.execute("UPDATE networking_action SET status = 'PENDING', \
uuid ='no-uuid'")
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
# ### end Alembic commands ###
|
Add a migration script for networking_action to add the new columns
If there are entries in the networking action table that we upgrading, that
means those are pending actions; so the script sets their status to PENDING.
And since when those actions were queued, the users didn't have a uuid to poll
on anyway, it just puts some string as uuids."""add uuid and status to NetworkingAction
Revision ID: 76529f0f9e50
Revises: 9089fa811a2b
Create Date: 2018-01-07 15:24:09.545021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76529f0f9e50'
down_revision = '9089fa811a2b'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
op.execute("UPDATE networking_action SET status = 'PENDING', \
uuid ='no-uuid'")
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add a migration script for networking_action to add the new columns
If there are entries in the networking action table that we upgrading, that
means those are pending actions; so the script sets their status to PENDING.
And since when those actions were queued, the users didn't have a uuid to poll
on anyway, it just puts some string as uuids.<commit_after>"""add uuid and status to NetworkingAction
Revision ID: 76529f0f9e50
Revises: 9089fa811a2b
Create Date: 2018-01-07 15:24:09.545021
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76529f0f9e50'
down_revision = '9089fa811a2b'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
op.execute("UPDATE networking_action SET status = 'PENDING', \
uuid ='no-uuid'")
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
# ### end Alembic commands ###
|
|
bc367b856af42cbc30bb32d710ed115ede59ef3d
|
py/valid-parenthesis-string.py
|
py/valid-parenthesis-string.py
|
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lowest, highest = 0, 0
for c in s:
if c == '(':
lowest += 1
highest += 1
elif c == ')':
if lowest > 0:
lowest -= 1
highest -= 1
if highest < 0:
return False
else:
if lowest > 0:
lowest -= 1
highest += 1
return lowest == 0
|
Add py solution for 678. Valid Parenthesis String
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/
|
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lowest, highest = 0, 0
for c in s:
if c == '(':
lowest += 1
highest += 1
elif c == ')':
if lowest > 0:
lowest -= 1
highest -= 1
if highest < 0:
return False
else:
if lowest > 0:
lowest -= 1
highest += 1
return lowest == 0
|
<commit_before><commit_msg>Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/<commit_after>
|
class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lowest, highest = 0, 0
for c in s:
if c == '(':
lowest += 1
highest += 1
elif c == ')':
if lowest > 0:
lowest -= 1
highest -= 1
if highest < 0:
return False
else:
if lowest > 0:
lowest -= 1
highest += 1
return lowest == 0
|
Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lowest, highest = 0, 0
for c in s:
if c == '(':
lowest += 1
highest += 1
elif c == ')':
if lowest > 0:
lowest -= 1
highest -= 1
if highest < 0:
return False
else:
if lowest > 0:
lowest -= 1
highest += 1
return lowest == 0
|
<commit_before><commit_msg>Add py solution for 678. Valid Parenthesis String
678. Valid Parenthesis String: https://leetcode.com/problems/valid-parenthesis-string/<commit_after>class Solution(object):
def checkValidString(self, s):
"""
:type s: str
:rtype: bool
"""
lowest, highest = 0, 0
for c in s:
if c == '(':
lowest += 1
highest += 1
elif c == ')':
if lowest > 0:
lowest -= 1
highest -= 1
if highest < 0:
return False
else:
if lowest > 0:
lowest -= 1
highest += 1
return lowest == 0
|
|
6b25d372d3f20cae5f1095f508cc0c4ba71de972
|
py/max-area-of-island.py
|
py/max-area-of-island.py
|
def link(info, size, r1, r2):
if size[r1] > size[r2]:
info[r2] = r1
size[r1] += size[r2]
return size[r1]
else:
info[r1] = r2
size[r2] += size[r1]
return size[r2]
def find_root(info, p1):
if p1 != info[p1]:
info[p1] = find_root(info, info[p1])
return info[p1]
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
info = dict()
size = dict()
rows = len(grid)
cols = len(grid[0])
m = 0
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
info[r, c] = (r, c)
size[r, c] = 1
m = 1
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
if r > 0 and grid[r - 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r - 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if r < rows - 1 and grid[r + 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r + 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c > 0 and grid[r][c - 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c - 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c < cols - 1 and grid[r][c + 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c + 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
return m
|
Add py solution for 695. Max Area of Island
|
Add py solution for 695. Max Area of Island
695. Max Area of Island: https://leetcode.com/problems/max-area-of-island/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 695. Max Area of Island
695. Max Area of Island: https://leetcode.com/problems/max-area-of-island/
|
def link(info, size, r1, r2):
if size[r1] > size[r2]:
info[r2] = r1
size[r1] += size[r2]
return size[r1]
else:
info[r1] = r2
size[r2] += size[r1]
return size[r2]
def find_root(info, p1):
if p1 != info[p1]:
info[p1] = find_root(info, info[p1])
return info[p1]
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
info = dict()
size = dict()
rows = len(grid)
cols = len(grid[0])
m = 0
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
info[r, c] = (r, c)
size[r, c] = 1
m = 1
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
if r > 0 and grid[r - 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r - 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if r < rows - 1 and grid[r + 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r + 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c > 0 and grid[r][c - 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c - 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c < cols - 1 and grid[r][c + 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c + 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
return m
|
<commit_before><commit_msg>Add py solution for 695. Max Area of Island
695. Max Area of Island: https://leetcode.com/problems/max-area-of-island/<commit_after>
|
def link(info, size, r1, r2):
if size[r1] > size[r2]:
info[r2] = r1
size[r1] += size[r2]
return size[r1]
else:
info[r1] = r2
size[r2] += size[r1]
return size[r2]
def find_root(info, p1):
if p1 != info[p1]:
info[p1] = find_root(info, info[p1])
return info[p1]
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
info = dict()
size = dict()
rows = len(grid)
cols = len(grid[0])
m = 0
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
info[r, c] = (r, c)
size[r, c] = 1
m = 1
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
if r > 0 and grid[r - 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r - 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if r < rows - 1 and grid[r + 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r + 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c > 0 and grid[r][c - 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c - 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c < cols - 1 and grid[r][c + 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c + 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
return m
|
Add py solution for 695. Max Area of Island
695. Max Area of Island: https://leetcode.com/problems/max-area-of-island/def link(info, size, r1, r2):
if size[r1] > size[r2]:
info[r2] = r1
size[r1] += size[r2]
return size[r1]
else:
info[r1] = r2
size[r2] += size[r1]
return size[r2]
def find_root(info, p1):
if p1 != info[p1]:
info[p1] = find_root(info, info[p1])
return info[p1]
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
info = dict()
size = dict()
rows = len(grid)
cols = len(grid[0])
m = 0
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
info[r, c] = (r, c)
size[r, c] = 1
m = 1
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
if r > 0 and grid[r - 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r - 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if r < rows - 1 and grid[r + 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r + 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c > 0 and grid[r][c - 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c - 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c < cols - 1 and grid[r][c + 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c + 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
return m
|
<commit_before><commit_msg>Add py solution for 695. Max Area of Island
695. Max Area of Island: https://leetcode.com/problems/max-area-of-island/<commit_after>def link(info, size, r1, r2):
if size[r1] > size[r2]:
info[r2] = r1
size[r1] += size[r2]
return size[r1]
else:
info[r1] = r2
size[r2] += size[r1]
return size[r2]
def find_root(info, p1):
if p1 != info[p1]:
info[p1] = find_root(info, info[p1])
return info[p1]
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
info = dict()
size = dict()
rows = len(grid)
cols = len(grid[0])
m = 0
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
info[r, c] = (r, c)
size[r, c] = 1
m = 1
for r, row in enumerate(grid):
for c, v in enumerate(row):
if v == 1:
if r > 0 and grid[r - 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r - 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if r < rows - 1 and grid[r + 1][c] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r + 1, c))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c > 0 and grid[r][c - 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c - 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
if c < cols - 1 and grid[r][c + 1] == 1:
ra, rb = find_root(info, (r, c)), find_root(info, (r, c + 1))
if ra != rb:
m = max(m, link(info, size, ra, rb))
return m
|
|
61d7a137feb873460f4158283034b4d8ae322e3c
|
contrib/update_db.py
|
contrib/update_db.py
|
#!/usr/bin/env python
# coding=utf8
"""update_db.py - A basic migration script for 3.x/4.x databases to 5.0.
Usage: ./update_db.py /path/to/config
Note that it takes the config, rather than the db. Currently, this only
supports text fields, since that's all the stock modules used. It migrates in
place, leaving old tables there, but you should still be sure to back up
everything first to be safe."""
import sqlite3
import sys
import willie
import willie.db
import willie.config
def main():
if willie.__version__.split('.', 1)[0] != '5':
print('Must have Willie 5 installed to run migration script.')
return
if len(sys.argv) != 2:
print('Usage: ./update_db.py /path/to/config')
config = willie.config.Config(sys.argv[1])
filename = config.db.userdb_file
if not filename:
filename = os.path.splitext(config.filename)[0] + '.db'
elif not config.core.db_filename:
print('Filename is only configured with old setting. Make sure you '
'set the db_filename setting in [core].')
print('Migrating db file {}'.format(filename))
new_db = willie.db.WillieDB(config)
conn = sqlite3.connect(new_db.filename)
cur = conn.cursor()
table_info = cur.execute('PRAGMA table_info(preferences)').fetchall()
for column in table_info:
old_name = column[1]
new_name = old_name if old_name != 'tz' else 'timezone'
if old_name == 'name':
continue
if column[2] != 'text':
msg = "Can't migrate non-text field {}. Please do so manually"
print(msg.format(old_name))
continue
print('Migrating column {}'.format(old_name))
values = cur.execute(
'SELECT name, {} FROM preferences'.format(old_name)).fetchall()
for value in values:
new_db.set_nick_value(value[0], new_name, value[1])
if __name__ == '__main__':
main()
|
Add a script to handle 5.0 db migrations
|
Add a script to handle 5.0 db migrations
|
Python
|
mit
|
Uname-a/knife_scraper,Uname-a/knife_scraper,Uname-a/knife_scraper
|
Add a script to handle 5.0 db migrations
|
#!/usr/bin/env python
# coding=utf8
"""update_db.py - A basic migration script for 3.x/4.x databases to 5.0.
Usage: ./update_db.py /path/to/config
Note that it takes the config, rather than the db. Currently, this only
supports text fields, since that's all the stock modules used. It migrates in
place, leaving old tables there, but you should still be sure to back up
everything first to be safe."""
import sqlite3
import sys
import willie
import willie.db
import willie.config
def main():
if willie.__version__.split('.', 1)[0] != '5':
print('Must have Willie 5 installed to run migration script.')
return
if len(sys.argv) != 2:
print('Usage: ./update_db.py /path/to/config')
config = willie.config.Config(sys.argv[1])
filename = config.db.userdb_file
if not filename:
filename = os.path.splitext(config.filename)[0] + '.db'
elif not config.core.db_filename:
print('Filename is only configured with old setting. Make sure you '
'set the db_filename setting in [core].')
print('Migrating db file {}'.format(filename))
new_db = willie.db.WillieDB(config)
conn = sqlite3.connect(new_db.filename)
cur = conn.cursor()
table_info = cur.execute('PRAGMA table_info(preferences)').fetchall()
for column in table_info:
old_name = column[1]
new_name = old_name if old_name != 'tz' else 'timezone'
if old_name == 'name':
continue
if column[2] != 'text':
msg = "Can't migrate non-text field {}. Please do so manually"
print(msg.format(old_name))
continue
print('Migrating column {}'.format(old_name))
values = cur.execute(
'SELECT name, {} FROM preferences'.format(old_name)).fetchall()
for value in values:
new_db.set_nick_value(value[0], new_name, value[1])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to handle 5.0 db migrations<commit_after>
|
#!/usr/bin/env python
# coding=utf8
"""update_db.py - A basic migration script for 3.x/4.x databases to 5.0.
Usage: ./update_db.py /path/to/config
Note that it takes the config, rather than the db. Currently, this only
supports text fields, since that's all the stock modules used. It migrates in
place, leaving old tables there, but you should still be sure to back up
everything first to be safe."""
import sqlite3
import sys
import willie
import willie.db
import willie.config
def main():
if willie.__version__.split('.', 1)[0] != '5':
print('Must have Willie 5 installed to run migration script.')
return
if len(sys.argv) != 2:
print('Usage: ./update_db.py /path/to/config')
config = willie.config.Config(sys.argv[1])
filename = config.db.userdb_file
if not filename:
filename = os.path.splitext(config.filename)[0] + '.db'
elif not config.core.db_filename:
print('Filename is only configured with old setting. Make sure you '
'set the db_filename setting in [core].')
print('Migrating db file {}'.format(filename))
new_db = willie.db.WillieDB(config)
conn = sqlite3.connect(new_db.filename)
cur = conn.cursor()
table_info = cur.execute('PRAGMA table_info(preferences)').fetchall()
for column in table_info:
old_name = column[1]
new_name = old_name if old_name != 'tz' else 'timezone'
if old_name == 'name':
continue
if column[2] != 'text':
msg = "Can't migrate non-text field {}. Please do so manually"
print(msg.format(old_name))
continue
print('Migrating column {}'.format(old_name))
values = cur.execute(
'SELECT name, {} FROM preferences'.format(old_name)).fetchall()
for value in values:
new_db.set_nick_value(value[0], new_name, value[1])
if __name__ == '__main__':
main()
|
Add a script to handle 5.0 db migrations#!/usr/bin/env python
# coding=utf8
"""update_db.py - A basic migration script for 3.x/4.x databases to 5.0.
Usage: ./update_db.py /path/to/config
Note that it takes the config, rather than the db. Currently, this only
supports text fields, since that's all the stock modules used. It migrates in
place, leaving old tables there, but you should still be sure to back up
everything first to be safe."""
import sqlite3
import sys
import willie
import willie.db
import willie.config
def main():
if willie.__version__.split('.', 1)[0] != '5':
print('Must have Willie 5 installed to run migration script.')
return
if len(sys.argv) != 2:
print('Usage: ./update_db.py /path/to/config')
config = willie.config.Config(sys.argv[1])
filename = config.db.userdb_file
if not filename:
filename = os.path.splitext(config.filename)[0] + '.db'
elif not config.core.db_filename:
print('Filename is only configured with old setting. Make sure you '
'set the db_filename setting in [core].')
print('Migrating db file {}'.format(filename))
new_db = willie.db.WillieDB(config)
conn = sqlite3.connect(new_db.filename)
cur = conn.cursor()
table_info = cur.execute('PRAGMA table_info(preferences)').fetchall()
for column in table_info:
old_name = column[1]
new_name = old_name if old_name != 'tz' else 'timezone'
if old_name == 'name':
continue
if column[2] != 'text':
msg = "Can't migrate non-text field {}. Please do so manually"
print(msg.format(old_name))
continue
print('Migrating column {}'.format(old_name))
values = cur.execute(
'SELECT name, {} FROM preferences'.format(old_name)).fetchall()
for value in values:
new_db.set_nick_value(value[0], new_name, value[1])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to handle 5.0 db migrations<commit_after>#!/usr/bin/env python
# coding=utf8
"""update_db.py - A basic migration script for 3.x/4.x databases to 5.0.
Usage: ./update_db.py /path/to/config
Note that it takes the config, rather than the db. Currently, this only
supports text fields, since that's all the stock modules used. It migrates in
place, leaving old tables there, but you should still be sure to back up
everything first to be safe."""
import sqlite3
import sys
import willie
import willie.db
import willie.config
def main():
if willie.__version__.split('.', 1)[0] != '5':
print('Must have Willie 5 installed to run migration script.')
return
if len(sys.argv) != 2:
print('Usage: ./update_db.py /path/to/config')
config = willie.config.Config(sys.argv[1])
filename = config.db.userdb_file
if not filename:
filename = os.path.splitext(config.filename)[0] + '.db'
elif not config.core.db_filename:
print('Filename is only configured with old setting. Make sure you '
'set the db_filename setting in [core].')
print('Migrating db file {}'.format(filename))
new_db = willie.db.WillieDB(config)
conn = sqlite3.connect(new_db.filename)
cur = conn.cursor()
table_info = cur.execute('PRAGMA table_info(preferences)').fetchall()
for column in table_info:
old_name = column[1]
new_name = old_name if old_name != 'tz' else 'timezone'
if old_name == 'name':
continue
if column[2] != 'text':
msg = "Can't migrate non-text field {}. Please do so manually"
print(msg.format(old_name))
continue
print('Migrating column {}'.format(old_name))
values = cur.execute(
'SELECT name, {} FROM preferences'.format(old_name)).fetchall()
for value in values:
new_db.set_nick_value(value[0], new_name, value[1])
if __name__ == '__main__':
main()
|
|
fe726fc182c9422856f2607842052df71e26acee
|
colour/examples/difference/examples_delta_e_luo2006.py
|
colour/examples/difference/examples_delta_e_luo2006.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases *Delta E* colour difference computation objects based on
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces.
"""
import colour
from colour.utilities.verbose import message_box
message_box('"Delta E - Luo et al. (2006)" Computations')
Jpapbp_1 = (54.90433134, -0.08450395, -0.06854831)
Jpapbp_2 = (54.90433134, -0.08442362, -0.06848314)
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-LCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-SCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-UCS" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2))
|
Add “Luo et al. (2006)” colour difference examples.
|
Add “Luo et al. (2006)” colour difference examples.
|
Python
|
bsd-3-clause
|
colour-science/colour
|
Add “Luo et al. (2006)” colour difference examples.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases *Delta E* colour difference computation objects based on
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces.
"""
import colour
from colour.utilities.verbose import message_box
message_box('"Delta E - Luo et al. (2006)" Computations')
Jpapbp_1 = (54.90433134, -0.08450395, -0.06854831)
Jpapbp_2 = (54.90433134, -0.08442362, -0.06848314)
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-LCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-SCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-UCS" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2))
|
<commit_before><commit_msg>Add “Luo et al. (2006)” colour difference examples.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases *Delta E* colour difference computation objects based on
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces.
"""
import colour
from colour.utilities.verbose import message_box
message_box('"Delta E - Luo et al. (2006)" Computations')
Jpapbp_1 = (54.90433134, -0.08450395, -0.06854831)
Jpapbp_2 = (54.90433134, -0.08442362, -0.06848314)
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-LCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-SCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-UCS" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2))
|
Add “Luo et al. (2006)” colour difference examples.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases *Delta E* colour difference computation objects based on
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces.
"""
import colour
from colour.utilities.verbose import message_box
message_box('"Delta E - Luo et al. (2006)" Computations')
Jpapbp_1 = (54.90433134, -0.08450395, -0.06854831)
Jpapbp_2 = (54.90433134, -0.08442362, -0.06848314)
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-LCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-SCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-UCS" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2))
|
<commit_before><commit_msg>Add “Luo et al. (2006)” colour difference examples.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Showcases *Delta E* colour difference computation objects based on
*Luo et al. (2006)* *CAM02-LCD*, *CAM02-SCD*, and *CAM02-UCS* colourspaces.
"""
import colour
from colour.utilities.verbose import message_box
message_box('"Delta E - Luo et al. (2006)" Computations')
Jpapbp_1 = (54.90433134, -0.08450395, -0.06854831)
Jpapbp_2 = (54.90433134, -0.08442362, -0.06848314)
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-LCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02LCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-SCD" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02SCD(Jpapbp_1, Jpapbp_2))
print('\n')
message_box(('Computing "Delta E" with "Luo et al. (2006)" "CAM02-UCS" method '
'from given "J\'a\'b\'" arrays:\n'
'\n\t{0}\n\t{1}'.format(Jpapbp_1, Jpapbp_2)))
print(colour.delta_E_CAM02UCS(Jpapbp_1, Jpapbp_2))
|
|
032d4e0d96f74f4b282c1407709b80957f9ee760
|
plugins/modules/dedicated_server_info.py
|
plugins/modules/dedicated_server_info.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_info
short_description: Retrieve all info for a OVH dedicated server
description:
- This module retrieves all info from a OVH dedicated server
author: Maxime Dupré
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_info:
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
try:
result = client.get('/dedicated/server/%s' % (service_name))
module.exit_json(changed=False, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
Add module to fetch server infos.
|
Add module to fetch server infos.
|
Python
|
mit
|
synthesio/infra-ovh-ansible-module
|
Add module to fetch server infos.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_info
short_description: Retrieve all info for a OVH dedicated server
description:
- This module retrieves all info from a OVH dedicated server
author: Maxime Dupré
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_info:
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
try:
result = client.get('/dedicated/server/%s' % (service_name))
module.exit_json(changed=False, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add module to fetch server infos.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_info
short_description: Retrieve all info for a OVH dedicated server
description:
- This module retrieves all info from a OVH dedicated server
author: Maxime Dupré
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_info:
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
try:
result = client.get('/dedicated/server/%s' % (service_name))
module.exit_json(changed=False, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
Add module to fetch server infos.#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_info
short_description: Retrieve all info for a OVH dedicated server
description:
- This module retrieves all info from a OVH dedicated server
author: Maxime Dupré
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_info:
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
try:
result = client.get('/dedicated/server/%s' % (service_name))
module.exit_json(changed=False, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add module to fetch server infos.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: dedicated_server_info
short_description: Retrieve all info for a OVH dedicated server
description:
- This module retrieves all info from a OVH dedicated server
author: Maxime Dupré
requirements:
- ovh >= 0.5.0
options:
service_name:
required: true
description: The service_name
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_info:
service_name: "{{ service_name }}"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
service_name=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
service_name = module.params['service_name']
try:
result = client.get('/dedicated/server/%s' % (service_name))
module.exit_json(changed=False, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
|
809518f5915dece739d02b84146e0b9dacbabc99
|
elections/uk/migrations/0005_add_favourite_biscuits.py
|
elections/uk/migrations/0005_add_favourite_biscuits.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
Add Favourite Biscuit as a field for candidates
|
Add Favourite Biscuit as a field for candidates
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add Favourite Biscuit as a field for candidates
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
<commit_before><commit_msg>Add Favourite Biscuit as a field for candidates<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
Add Favourite Biscuit as a field for candidates# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
<commit_before><commit_msg>Add Favourite Biscuit as a field for candidates<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
|
26953686515270497cf2361f4a20039603f2f1bd
|
InvenTree/company/migrations/0018_supplierpart_manufacturer.py
|
InvenTree/company/migrations/0018_supplierpart_manufacturer.py
|
# Generated by Django 2.2.10 on 2020-04-13 03:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0017_auto_20200413_0320'),
]
operations = [
migrations.AddField(
model_name='supplierpart',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='manufactured_parts', to='company.Company'),
),
]
|
Add migration to create a 'manufacturer' field to the SupplierPart model
|
Add migration to create a 'manufacturer' field to the SupplierPart model
(cherry picked from commit 890e938662ed4aff53ea9399b54a86359d23f23f)
|
Python
|
mit
|
inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree
|
Add migration to create a 'manufacturer' field to the SupplierPart model
(cherry picked from commit 890e938662ed4aff53ea9399b54a86359d23f23f)
|
# Generated by Django 2.2.10 on 2020-04-13 03:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0017_auto_20200413_0320'),
]
operations = [
migrations.AddField(
model_name='supplierpart',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='manufactured_parts', to='company.Company'),
),
]
|
<commit_before><commit_msg>Add migration to create a 'manufacturer' field to the SupplierPart model
(cherry picked from commit 890e938662ed4aff53ea9399b54a86359d23f23f)<commit_after>
|
# Generated by Django 2.2.10 on 2020-04-13 03:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0017_auto_20200413_0320'),
]
operations = [
migrations.AddField(
model_name='supplierpart',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='manufactured_parts', to='company.Company'),
),
]
|
Add migration to create a 'manufacturer' field to the SupplierPart model
(cherry picked from commit 890e938662ed4aff53ea9399b54a86359d23f23f)# Generated by Django 2.2.10 on 2020-04-13 03:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0017_auto_20200413_0320'),
]
operations = [
migrations.AddField(
model_name='supplierpart',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='manufactured_parts', to='company.Company'),
),
]
|
<commit_before><commit_msg>Add migration to create a 'manufacturer' field to the SupplierPart model
(cherry picked from commit 890e938662ed4aff53ea9399b54a86359d23f23f)<commit_after># Generated by Django 2.2.10 on 2020-04-13 03:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0017_auto_20200413_0320'),
]
operations = [
migrations.AddField(
model_name='supplierpart',
name='manufacturer',
field=models.ForeignKey(blank=True, help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='manufactured_parts', to='company.Company'),
),
]
|
|
9deb9f894ec813d13ec0de3a6137a0eb73f4238b
|
python/challenges/30daysofcode/day-00.py
|
python/challenges/30daysofcode/day-00.py
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
input_string = input()
# Print a string literal saying "Hello, World." to stdout.
print('Hello, World.')
# TODO: Write a line of code here that prints the contents of input_string to stdout.
print(input_string)
|
Add 30 Days of Code Day 0 in Python.
|
Add 30 Days of Code Day 0 in Python.
|
Python
|
mit
|
KoderDojo/hackerrank,KoderDojo/hackerrank
|
Add 30 Days of Code Day 0 in Python.
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
input_string = input()
# Print a string literal saying "Hello, World." to stdout.
print('Hello, World.')
# TODO: Write a line of code here that prints the contents of input_string to stdout.
print(input_string)
|
<commit_before><commit_msg>Add 30 Days of Code Day 0 in Python.<commit_after>
|
# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
input_string = input()
# Print a string literal saying "Hello, World." to stdout.
print('Hello, World.')
# TODO: Write a line of code here that prints the contents of input_string to stdout.
print(input_string)
|
Add 30 Days of Code Day 0 in Python.# Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
input_string = input()
# Print a string literal saying "Hello, World." to stdout.
print('Hello, World.')
# TODO: Write a line of code here that prints the contents of input_string to stdout.
print(input_string)
|
<commit_before><commit_msg>Add 30 Days of Code Day 0 in Python.<commit_after># Read a full line of input from stdin and save it to our dynamically typed variable, input_string.
input_string = input()
# Print a string literal saying "Hello, World." to stdout.
print('Hello, World.')
# TODO: Write a line of code here that prints the contents of input_string to stdout.
print(input_string)
|
|
be3259a085e8df17baaad4bcbb1704b0239a3b3b
|
falmer/content/migrations/0041_auto_20180912_1149.py
|
falmer/content/migrations/0041_auto_20180912_1149.py
|
# Generated by Django 2.0.8 on 2018-09-12 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
|
Remove old Staff page types
|
Remove old Staff page types
|
Python
|
mit
|
sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer
|
Remove old Staff page types
|
# Generated by Django 2.0.8 on 2018-09-12 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
|
<commit_before><commit_msg>Remove old Staff page types<commit_after>
|
# Generated by Django 2.0.8 on 2018-09-12 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
|
Remove old Staff page types# Generated by Django 2.0.8 on 2018-09-12 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
|
<commit_before><commit_msg>Remove old Staff page types<commit_after># Generated by Django 2.0.8 on 2018-09-12 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
|
|
aeefc9be0ca7405f06ca32e02445f764daa33a84
|
exa/__main__.py
|
exa/__main__.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update, config
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
Put config back in thing
|
Put config back in thing
|
Python
|
apache-2.0
|
avmarchenko/exa,alexvmarch/exa,exa-analytics/exa,tjduigna/exa,alexvmarch/exa,alexvmarch/exa,exa-analytics/exa
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
Put config back in thing
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update, config
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
<commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
<commit_msg>Put config back in thing<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update, config
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
Put config back in thing#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update, config
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
<commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
<commit_msg>Put config back in thing<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Executables
########################
Exa provides two executables; "exa" and "exw". For the graphical user interface,
built on top of the Jupyter notebook environment, run "exa" on the command line.
'''
import argparse
import subprocess
from exa._config import set_update, config
def notebook():
'''
Start the exa notebook gui (a Jupyter notebook environment).
'''
subprocess.Popen(['jupyter notebook'], shell=True, cwd=config['paths']['notebooks'])
def workflow(wkflw):
'''
Args:
wkflw: Path to workflow script or instance of workflow class.
'''
raise NotImplementedError('Workflows are currently unsupported.')
def main():
'''
Main entry point for the application.
'''
parser = argparse.ArgumentParser(description=None)
parser.add_argument(
'-u',
'--update',
action='store_true',
help='Update static data and extensions (updates will occur on next import).'
)
parser.add_argument(
'-w',
'--workflow',
type=str,
help='Workflow not implemented',
required=False,
default=None
)
args = parser.parse_args()
if args.update == True:
set_update()
elif args.workflow is None:
notebook()
else:
workflow(args.workflow)
if __name__ == '__main__':
main()
|
3d0f0a1a0664ee8329bacb5732bd36cf198c0f90
|
loafer/route.py
|
loafer/route.py
|
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=5,
MaxNumberOfMessages=10)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
Add initial Route implementation: responsible to link the queue and the job executor
|
Add initial Route implementation: responsible to link the queue and the job executor
|
Python
|
mit
|
georgeyk/loafer
|
Add initial Route implementation: responsible to link the queue and the job executor
|
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=5,
MaxNumberOfMessages=10)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
<commit_before><commit_msg>Add initial Route implementation: responsible to link the queue and the job executor<commit_after>
|
# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=5,
MaxNumberOfMessages=10)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
Add initial Route implementation: responsible to link the queue and the job executor# -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=5,
MaxNumberOfMessages=10)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
<commit_before><commit_msg>Add initial Route implementation: responsible to link the queue and the job executor<commit_after># -*- coding: utf-8 -*-
import asyncio
from functools import partial
import importlib
import logging
import boto3
from cached_property import cached_property
logger = logging.getLogger(__name__)
class Route(object):
def __init__(self, queue, handler, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._client = boto3.client('sqs')
self.queue_name = queue
self._handler = handler
def __str__(self):
return '<Router(queue={} handler={})>'.format(self.queue_name, self._handler)
@cached_property
def queue_url(self):
response = self._client.get_queue_url(QueueName=self.queue_name)
return response['QueueUrl']
@cached_property
def handler(self):
package = '.'.join(self._handler.split('.')[:-1])
name = self._handler.split('.')[-1]
module = importlib.import_module(package)
return getattr(module, name)
async def handle_message(self, message):
if asyncio.iscoroutinefunction(self.handler):
logger.info('Handler is coroutine! {!r}'.format(self.handler))
return await self.handler(message)
else:
logger.info('Handler will run in a separate thread: {!r}'.format(self.handler))
return await self._loop.run_in_executor(None, self.handler, message)
async def fetch_messages(self):
fn = partial(self._client.receive_message,
QueueUrl=self.queue_url,
WaitTimeSeconds=5,
MaxNumberOfMessages=10)
response = await self._loop.run_in_executor(None, fn)
return response.get('Messages', [])
|
|
2b01c4145dbd934a8abe2d3cb07e1f5e40d87e24
|
examples/lvm.py
|
examples/lvm.py
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
from blivet.size import Size
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_file(b, "disk1", Size(spec="100GB"))
b.config.diskImages["disk1"] = disk1_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
b.initializeDisk(disk1)
pv = b.newPartition(size=Size(spec="50GB"), fmt_type="lvmpv")
b.createDevice(pv)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
vg = b.newVG(parents=[pv])
b.createDevice(vg)
# new lv with base size 5GB and unbounded growth and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
parents=[vg], name="unbounded")
b.createDevice(dev)
# new lv with base size 5GB and growth up to 15GB and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
maxsize=Size(spec="15GB"), parents=[vg], name="bounded")
b.createDevice(dev)
# new lv with a fixed size of 2GB formatted as swap space
dev = b.newLV(fmt_type="swap", size=Size(spec="2GB"), parents=[vg])
b.createDevice(dev)
# allocate the growable lvs
blivet.partitioning.growLVM(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
|
Add an example of creating lvs using growable requests.
|
Add an example of creating lvs using growable requests.
|
Python
|
lgpl-2.1
|
jkonecny12/blivet,vojtechtrefny/blivet,dwlehman/blivet,AdamWill/blivet,rhinstaller/blivet,vpodzime/blivet,vpodzime/blivet,rvykydal/blivet,dwlehman/blivet,rvykydal/blivet,jkonecny12/blivet,AdamWill/blivet,rhinstaller/blivet,vojtechtrefny/blivet
|
Add an example of creating lvs using growable requests.
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
from blivet.size import Size
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_file(b, "disk1", Size(spec="100GB"))
b.config.diskImages["disk1"] = disk1_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
b.initializeDisk(disk1)
pv = b.newPartition(size=Size(spec="50GB"), fmt_type="lvmpv")
b.createDevice(pv)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
vg = b.newVG(parents=[pv])
b.createDevice(vg)
# new lv with base size 5GB and unbounded growth and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
parents=[vg], name="unbounded")
b.createDevice(dev)
# new lv with base size 5GB and growth up to 15GB and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
maxsize=Size(spec="15GB"), parents=[vg], name="bounded")
b.createDevice(dev)
# new lv with a fixed size of 2GB formatted as swap space
dev = b.newLV(fmt_type="swap", size=Size(spec="2GB"), parents=[vg])
b.createDevice(dev)
# allocate the growable lvs
blivet.partitioning.growLVM(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
|
<commit_before><commit_msg>Add an example of creating lvs using growable requests.<commit_after>
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
from blivet.size import Size
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_file(b, "disk1", Size(spec="100GB"))
b.config.diskImages["disk1"] = disk1_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
b.initializeDisk(disk1)
pv = b.newPartition(size=Size(spec="50GB"), fmt_type="lvmpv")
b.createDevice(pv)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
vg = b.newVG(parents=[pv])
b.createDevice(vg)
# new lv with base size 5GB and unbounded growth and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
parents=[vg], name="unbounded")
b.createDevice(dev)
# new lv with base size 5GB and growth up to 15GB and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
maxsize=Size(spec="15GB"), parents=[vg], name="bounded")
b.createDevice(dev)
# new lv with a fixed size of 2GB formatted as swap space
dev = b.newLV(fmt_type="swap", size=Size(spec="2GB"), parents=[vg])
b.createDevice(dev)
# allocate the growable lvs
blivet.partitioning.growLVM(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
|
Add an example of creating lvs using growable requests.import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
from blivet.size import Size
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_file(b, "disk1", Size(spec="100GB"))
b.config.diskImages["disk1"] = disk1_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
b.initializeDisk(disk1)
pv = b.newPartition(size=Size(spec="50GB"), fmt_type="lvmpv")
b.createDevice(pv)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
vg = b.newVG(parents=[pv])
b.createDevice(vg)
# new lv with base size 5GB and unbounded growth and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
parents=[vg], name="unbounded")
b.createDevice(dev)
# new lv with base size 5GB and growth up to 15GB and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
maxsize=Size(spec="15GB"), parents=[vg], name="bounded")
b.createDevice(dev)
# new lv with a fixed size of 2GB formatted as swap space
dev = b.newLV(fmt_type="swap", size=Size(spec="2GB"), parents=[vg])
b.createDevice(dev)
# allocate the growable lvs
blivet.partitioning.growLVM(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
|
<commit_before><commit_msg>Add an example of creating lvs using growable requests.<commit_after>import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
from blivet.size import Size
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_file(b, "disk1", Size(spec="100GB"))
b.config.diskImages["disk1"] = disk1_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
b.initializeDisk(disk1)
pv = b.newPartition(size=Size(spec="50GB"), fmt_type="lvmpv")
b.createDevice(pv)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
vg = b.newVG(parents=[pv])
b.createDevice(vg)
# new lv with base size 5GB and unbounded growth and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
parents=[vg], name="unbounded")
b.createDevice(dev)
# new lv with base size 5GB and growth up to 15GB and an ext4 filesystem
dev = b.newLV(fmt_type="ext4", size=Size(spec="5GB"), grow=True,
maxsize=Size(spec="15GB"), parents=[vg], name="bounded")
b.createDevice(dev)
# new lv with a fixed size of 2GB formatted as swap space
dev = b.newLV(fmt_type="swap", size=Size(spec="2GB"), parents=[vg])
b.createDevice(dev)
# allocate the growable lvs
blivet.partitioning.growLVM(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
|
|
fadb99ce3a93b2e4be7a654277c921fb5ed562ad
|
replace-jars.py
|
replace-jars.py
|
#!/usr/bin/env python
import os
import re
import shutil
import sys
""" Automate updating multiple HDP jars for debugging/hotfix purposes.
Agnostic to the target directory layout which can differ across HDP versions.
"""
if (len(sys.argv) != 4):
print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>")
print(" source-dir : Directory containing the new jar versions.")
print(" source-version : Version string of the new jars.")
print(" dst-version : Installed HDP version to be updated.")
sys.exit(1)
# Strip out the first three digits which are the Apache version
# from dst_ver.
#
src, src_ver, dst_ver = sys.argv[1:]
ver_pattern = re.compile('^\d+\.\d+\.\d+\.')
dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver)
# Sanity checks.
#
if not os.path.isdir(dst):
print("Directory {} does not exist".format(dst))
sys.exit(1)
if not os.path.isdir(src):
print("Directory {} does not exist".format(src))
sys.exit(1)
# Build a map of source jar name to its full path under
# the source directory.
#
sources = {}
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.jar') and f not in sources:
sources[f] = os.path.join(root, f)
print("Got {} source jars.".format(len(sources)))
# List destination jars, and replace each with the corresponding
# source jar.
# TODO: Create a backup of the jars being replaced.
#
jars_replaced = 0
for root, dirs, files in os.walk(dst):
for f in files:
if f.endswith('.jar') and f.startswith('hadoop'):
dest = os.path.join(root, f)
src_jar_name = f.replace(dst_ver, src_ver, 1)
if src_jar_name in sources and os.path.isfile(dest):
print("{} -> {}".format(dest, sources[src_jar_name]))
shutil.copy2(sources[src_jar_name], dest)
jars_replaced += 1
print("Replaced {} jars.".format(jars_replaced))
|
Add script to automate replacing HDP jars.
|
Add script to automate replacing HDP jars.
|
Python
|
apache-2.0
|
arp7/HadoopTools,arp7/HadoopTools,arp7/HadoopTools
|
Add script to automate replacing HDP jars.
|
#!/usr/bin/env python
import os
import re
import shutil
import sys
""" Automate updating multiple HDP jars for debugging/hotfix purposes.
Agnostic to the target directory layout which can differ across HDP versions.
"""
if (len(sys.argv) != 4):
print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>")
print(" source-dir : Directory containing the new jar versions.")
print(" source-version : Version string of the new jars.")
print(" dst-version : Installed HDP version to be updated.")
sys.exit(1)
# Strip out the first three digits which are the Apache version
# from dst_ver.
#
src, src_ver, dst_ver = sys.argv[1:]
ver_pattern = re.compile('^\d+\.\d+\.\d+\.')
dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver)
# Sanity checks.
#
if not os.path.isdir(dst):
print("Directory {} does not exist".format(dst))
sys.exit(1)
if not os.path.isdir(src):
print("Directory {} does not exist".format(src))
sys.exit(1)
# Build a map of source jar name to its full path under
# the source directory.
#
sources = {}
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.jar') and f not in sources:
sources[f] = os.path.join(root, f)
print("Got {} source jars.".format(len(sources)))
# List destination jars, and replace each with the corresponding
# source jar.
# TODO: Create a backup of the jars being replaced.
#
jars_replaced = 0
for root, dirs, files in os.walk(dst):
for f in files:
if f.endswith('.jar') and f.startswith('hadoop'):
dest = os.path.join(root, f)
src_jar_name = f.replace(dst_ver, src_ver, 1)
if src_jar_name in sources and os.path.isfile(dest):
print("{} -> {}".format(dest, sources[src_jar_name]))
shutil.copy2(sources[src_jar_name], dest)
jars_replaced += 1
print("Replaced {} jars.".format(jars_replaced))
|
<commit_before><commit_msg>Add script to automate replacing HDP jars.<commit_after>
|
#!/usr/bin/env python
import os
import re
import shutil
import sys
""" Automate updating multiple HDP jars for debugging/hotfix purposes.
Agnostic to the target directory layout which can differ across HDP versions.
"""
if (len(sys.argv) != 4):
print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>")
print(" source-dir : Directory containing the new jar versions.")
print(" source-version : Version string of the new jars.")
print(" dst-version : Installed HDP version to be updated.")
sys.exit(1)
# Strip out the first three digits which are the Apache version
# from dst_ver.
#
src, src_ver, dst_ver = sys.argv[1:]
ver_pattern = re.compile('^\d+\.\d+\.\d+\.')
dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver)
# Sanity checks.
#
if not os.path.isdir(dst):
print("Directory {} does not exist".format(dst))
sys.exit(1)
if not os.path.isdir(src):
print("Directory {} does not exist".format(src))
sys.exit(1)
# Build a map of source jar name to its full path under
# the source directory.
#
sources = {}
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.jar') and f not in sources:
sources[f] = os.path.join(root, f)
print("Got {} source jars.".format(len(sources)))
# List destination jars, and replace each with the corresponding
# source jar.
# TODO: Create a backup of the jars being replaced.
#
jars_replaced = 0
for root, dirs, files in os.walk(dst):
for f in files:
if f.endswith('.jar') and f.startswith('hadoop'):
dest = os.path.join(root, f)
src_jar_name = f.replace(dst_ver, src_ver, 1)
if src_jar_name in sources and os.path.isfile(dest):
print("{} -> {}".format(dest, sources[src_jar_name]))
shutil.copy2(sources[src_jar_name], dest)
jars_replaced += 1
print("Replaced {} jars.".format(jars_replaced))
|
Add script to automate replacing HDP jars.#!/usr/bin/env python
import os
import re
import shutil
import sys
""" Automate updating multiple HDP jars for debugging/hotfix purposes.
Agnostic to the target directory layout which can differ across HDP versions.
"""
if (len(sys.argv) != 4):
print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>")
print(" source-dir : Directory containing the new jar versions.")
print(" source-version : Version string of the new jars.")
print(" dst-version : Installed HDP version to be updated.")
sys.exit(1)
# Strip out the first three digits which are the Apache version
# from dst_ver.
#
src, src_ver, dst_ver = sys.argv[1:]
ver_pattern = re.compile('^\d+\.\d+\.\d+\.')
dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver)
# Sanity checks.
#
if not os.path.isdir(dst):
print("Directory {} does not exist".format(dst))
sys.exit(1)
if not os.path.isdir(src):
print("Directory {} does not exist".format(src))
sys.exit(1)
# Build a map of source jar name to its full path under
# the source directory.
#
sources = {}
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.jar') and f not in sources:
sources[f] = os.path.join(root, f)
print("Got {} source jars.".format(len(sources)))
# List destination jars, and replace each with the corresponding
# source jar.
# TODO: Create a backup of the jars being replaced.
#
jars_replaced = 0
for root, dirs, files in os.walk(dst):
for f in files:
if f.endswith('.jar') and f.startswith('hadoop'):
dest = os.path.join(root, f)
src_jar_name = f.replace(dst_ver, src_ver, 1)
if src_jar_name in sources and os.path.isfile(dest):
print("{} -> {}".format(dest, sources[src_jar_name]))
shutil.copy2(sources[src_jar_name], dest)
jars_replaced += 1
print("Replaced {} jars.".format(jars_replaced))
|
<commit_before><commit_msg>Add script to automate replacing HDP jars.<commit_after>#!/usr/bin/env python
import os
import re
import shutil
import sys
""" Automate updating multiple HDP jars for debugging/hotfix purposes.
Agnostic to the target directory layout which can differ across HDP versions.
"""
if (len(sys.argv) != 4):
print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>")
print(" source-dir : Directory containing the new jar versions.")
print(" source-version : Version string of the new jars.")
print(" dst-version : Installed HDP version to be updated.")
sys.exit(1)
# Strip out the first three digits which are the Apache version
# from dst_ver.
#
src, src_ver, dst_ver = sys.argv[1:]
ver_pattern = re.compile('^\d+\.\d+\.\d+\.')
dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver)
# Sanity checks.
#
if not os.path.isdir(dst):
print("Directory {} does not exist".format(dst))
sys.exit(1)
if not os.path.isdir(src):
print("Directory {} does not exist".format(src))
sys.exit(1)
# Build a map of source jar name to its full path under
# the source directory.
#
sources = {}
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.jar') and f not in sources:
sources[f] = os.path.join(root, f)
print("Got {} source jars.".format(len(sources)))
# List destination jars, and replace each with the corresponding
# source jar.
# TODO: Create a backup of the jars being replaced.
#
jars_replaced = 0
for root, dirs, files in os.walk(dst):
for f in files:
if f.endswith('.jar') and f.startswith('hadoop'):
dest = os.path.join(root, f)
src_jar_name = f.replace(dst_ver, src_ver, 1)
if src_jar_name in sources and os.path.isfile(dest):
print("{} -> {}".format(dest, sources[src_jar_name]))
shutil.copy2(sources[src_jar_name], dest)
jars_replaced += 1
print("Replaced {} jars.".format(jars_replaced))
|
|
1de28842f7c8dd7c7553b06ed618152c7d144ba0
|
keras_fcn/callbacks.py
|
keras_fcn/callbacks.py
|
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.callbacks import Callback
class CheckNumericsOps(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, validation_data, batch_size=1, histogram_freq=1):
super(CheckNumericsOps, self).__init__()
self.check_num = None
self.batch_size = batch_size
self.histogram_freq = histogram_freq
self.validation_data = validation_data
def set_model(self, model):
self.model = model
self.sess = K.get_session()
self.check_num = tf.add_check_numerics_ops()
def on_batch_end(self, batch, logs=None):
if self.validation_data and self.histogram_freq:
if batch % self.histogram_freq == 0:
for layer in self.model.layers:
functor = K.function([self.model.input, K.learning_phase()], [layer.output])
layer_out = functor(self.validation_data)
if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
print('The output of {} becomes nan'.format(layer.name))
self.model.stop_training = True
|
Add NaN output detection callback
|
Add NaN output detection callback
|
Python
|
mit
|
JihongJu/keras-fcn
|
Add NaN output detection callback
|
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.callbacks import Callback
class CheckNumericsOps(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, validation_data, batch_size=1, histogram_freq=1):
super(CheckNumericsOps, self).__init__()
self.check_num = None
self.batch_size = batch_size
self.histogram_freq = histogram_freq
self.validation_data = validation_data
def set_model(self, model):
self.model = model
self.sess = K.get_session()
self.check_num = tf.add_check_numerics_ops()
def on_batch_end(self, batch, logs=None):
if self.validation_data and self.histogram_freq:
if batch % self.histogram_freq == 0:
for layer in self.model.layers:
functor = K.function([self.model.input, K.learning_phase()], [layer.output])
layer_out = functor(self.validation_data)
if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
print('The output of {} becomes nan'.format(layer.name))
self.model.stop_training = True
|
<commit_before><commit_msg>Add NaN output detection callback<commit_after>
|
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.callbacks import Callback
class CheckNumericsOps(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, validation_data, batch_size=1, histogram_freq=1):
super(CheckNumericsOps, self).__init__()
self.check_num = None
self.batch_size = batch_size
self.histogram_freq = histogram_freq
self.validation_data = validation_data
def set_model(self, model):
self.model = model
self.sess = K.get_session()
self.check_num = tf.add_check_numerics_ops()
def on_batch_end(self, batch, logs=None):
if self.validation_data and self.histogram_freq:
if batch % self.histogram_freq == 0:
for layer in self.model.layers:
functor = K.function([self.model.input, K.learning_phase()], [layer.output])
layer_out = functor(self.validation_data)
if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
print('The output of {} becomes nan'.format(layer.name))
self.model.stop_training = True
|
Add NaN output detection callbackimport numpy as np
import tensorflow as tf
import keras.backend as K
from keras.callbacks import Callback
class CheckNumericsOps(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, validation_data, batch_size=1, histogram_freq=1):
super(CheckNumericsOps, self).__init__()
self.check_num = None
self.batch_size = batch_size
self.histogram_freq = histogram_freq
self.validation_data = validation_data
def set_model(self, model):
self.model = model
self.sess = K.get_session()
self.check_num = tf.add_check_numerics_ops()
def on_batch_end(self, batch, logs=None):
if self.validation_data and self.histogram_freq:
if batch % self.histogram_freq == 0:
for layer in self.model.layers:
functor = K.function([self.model.input, K.learning_phase()], [layer.output])
layer_out = functor(self.validation_data)
if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
print('The output of {} becomes nan'.format(layer.name))
self.model.stop_training = True
|
<commit_before><commit_msg>Add NaN output detection callback<commit_after>import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.callbacks import Callback
class CheckNumericsOps(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, validation_data, batch_size=1, histogram_freq=1):
super(CheckNumericsOps, self).__init__()
self.check_num = None
self.batch_size = batch_size
self.histogram_freq = histogram_freq
self.validation_data = validation_data
def set_model(self, model):
self.model = model
self.sess = K.get_session()
self.check_num = tf.add_check_numerics_ops()
def on_batch_end(self, batch, logs=None):
if self.validation_data and self.histogram_freq:
if batch % self.histogram_freq == 0:
for layer in self.model.layers:
functor = K.function([self.model.input, K.learning_phase()], [layer.output])
layer_out = functor(self.validation_data)
if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
print('The output of {} becomes nan'.format(layer.name))
self.model.stop_training = True
|
|
5c606b2a5628ead8724344252913668c82db9c22
|
orlo/migrations/0868747e62ff_add_unique_constraints.py
|
orlo/migrations/0868747e62ff_add_unique_constraints.py
|
"""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
|
Add alembic migration: "add unique constraints"
|
Add alembic migration: "add unique constraints"
|
Python
|
mit
|
al4/sponge,eBayClassifiedsGroup/sponge,al4/orlo,al4/orlo,eBayClassifiedsGroup/sponge,al4/sponge,eBayClassifiedsGroup/sponge,al4/sponge,eBayClassifiedsGroup/orlo,eBayClassifiedsGroup/orlo
|
Add alembic migration: "add unique constraints"
|
"""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
|
<commit_before><commit_msg>Add alembic migration: "add unique constraints"<commit_after>
|
"""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
|
Add alembic migration: "add unique constraints""""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
|
<commit_before><commit_msg>Add alembic migration: "add unique constraints"<commit_after>"""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
|
|
93998f2165daa1fd85b855f4ebfd258acea6cf9a
|
test/io/test_count_table_npz.py
|
test/io/test_count_table_npz.py
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 31/01/19
content: Test I/O of sparse counts table via npz files.
'''
import os
def test_parse__save_npz():
print('Parsing example NPZ count table')
from singlet.io import parse_counts_table_sparse
table = parse_counts_table_sparse({'countsname': 'example_PBMC_sparse'})
print('Done!')
print('Saving NPZ count table')
from singlet.io.npz import to_counts_table_sparse
to_counts_table_sparse(table, 'example_data/example_PBMC_sparse_backup.npz')
os.remove('example_data/example_PBMC_sparse_backup.npz')
print('Done!')
|
Cover tests for npz i/o
|
Cover tests for npz i/o
|
Python
|
mit
|
iosonofabio/singlet,iosonofabio/singlet
|
Cover tests for npz i/o
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 31/01/19
content: Test I/O of sparse counts table via npz files.
'''
import os
def test_parse__save_npz():
print('Parsing example NPZ count table')
from singlet.io import parse_counts_table_sparse
table = parse_counts_table_sparse({'countsname': 'example_PBMC_sparse'})
print('Done!')
print('Saving NPZ count table')
from singlet.io.npz import to_counts_table_sparse
to_counts_table_sparse(table, 'example_data/example_PBMC_sparse_backup.npz')
os.remove('example_data/example_PBMC_sparse_backup.npz')
print('Done!')
|
<commit_before><commit_msg>Cover tests for npz i/o<commit_after>
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 31/01/19
content: Test I/O of sparse counts table via npz files.
'''
import os
def test_parse__save_npz():
print('Parsing example NPZ count table')
from singlet.io import parse_counts_table_sparse
table = parse_counts_table_sparse({'countsname': 'example_PBMC_sparse'})
print('Done!')
print('Saving NPZ count table')
from singlet.io.npz import to_counts_table_sparse
to_counts_table_sparse(table, 'example_data/example_PBMC_sparse_backup.npz')
os.remove('example_data/example_PBMC_sparse_backup.npz')
print('Done!')
|
Cover tests for npz i/o# vim: fdm=indent
'''
author: Fabio Zanini
date: 31/01/19
content: Test I/O of sparse counts table via npz files.
'''
import os
def test_parse__save_npz():
print('Parsing example NPZ count table')
from singlet.io import parse_counts_table_sparse
table = parse_counts_table_sparse({'countsname': 'example_PBMC_sparse'})
print('Done!')
print('Saving NPZ count table')
from singlet.io.npz import to_counts_table_sparse
to_counts_table_sparse(table, 'example_data/example_PBMC_sparse_backup.npz')
os.remove('example_data/example_PBMC_sparse_backup.npz')
print('Done!')
|
<commit_before><commit_msg>Cover tests for npz i/o<commit_after># vim: fdm=indent
'''
author: Fabio Zanini
date: 31/01/19
content: Test I/O of sparse counts table via npz files.
'''
import os
def test_parse__save_npz():
print('Parsing example NPZ count table')
from singlet.io import parse_counts_table_sparse
table = parse_counts_table_sparse({'countsname': 'example_PBMC_sparse'})
print('Done!')
print('Saving NPZ count table')
from singlet.io.npz import to_counts_table_sparse
to_counts_table_sparse(table, 'example_data/example_PBMC_sparse_backup.npz')
os.remove('example_data/example_PBMC_sparse_backup.npz')
print('Done!')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.